name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
framework_ConnectorMap_getComponentConnectors
/** * Gets all registered {@link ComponentConnector} instances. * * @return An array of all registered {@link ComponentConnector} instances * * @deprecated As of 7.0.1, use {@link #getComponentConnectorsAsJsArray()} * for better performance. */ @Deprecated public ComponentConnector[] getComponentConnectors() { List<ComponentConnector> result = new ArrayList<>(); JsArrayObject<ServerConnector> connectors = getConnectorsAsJsArray(); int size = connectors.size(); for (int i = 0; i < size; i++) { ServerConnector connector = connectors.get(i); if (connector instanceof ComponentConnector) { result.add((ComponentConnector) connector); } } return result.toArray(new ComponentConnector[result.size()]); }
3.68
hbase_KeyValue_create
/** * Create a KeyValue reading <code>length</code> from <code>in</code> * @param length length of the Key * @param in Input to read from * @return Created KeyValue OR if we find a length of zero, we will return null which can be * useful marking a stream as done. * @throws IOException if any IO error happen */ public static KeyValue create(int length, final DataInput in) throws IOException { if (length <= 0) { if (length == 0) { return null; } throw new IOException("Failed read " + length + " bytes, stream corrupt?"); } // This is how the old Writables.readFrom used to deserialize. Didn't even vint. byte[] bytes = new byte[length]; in.readFully(bytes); return new KeyValue(bytes, 0, length); }
3.68
morf_HumanReadableStatementHelper_generateUpdateStatementString
/** * Generates a human-readable description of a data update operation. * * @param statement the data upgrade statement to describe * @return a string containing the human-readable description of the operation */ private static String generateUpdateStatementString(final UpdateStatement statement) { final StringBuilder sb = new StringBuilder(); if (statement.getWhereCriterion() == null) { sb.append(String.format("Update records in %s", statement.getTable().getName())); } else { sb.append(String.format("Update %s%s", statement.getTable().getName(), generateWhereClause(statement.getWhereCriterion()))); } for (AliasedField field : statement.getFields()) { sb.append(generateAliasedFieldAssignmentString(field)); } return sb.toString(); }
3.68
hadoop_MetricsCache_get
/** * Get the cached record * @param name of the record * @param tags of the record * @return the cached record or null */ public Record get(String name, Collection<MetricsTag> tags) { RecordCache rc = map.get(name); if (rc == null) return null; return rc.get(tags); }
3.68
hbase_Scan_setBatch
/** * Set the maximum number of cells to return for each call to next(). Callers should be aware that * this is not equivalent to calling {@link #setAllowPartialResults(boolean)}. If you don't allow * partial results, the number of cells in each Result must equal to your batch setting unless it * is the last Result for current row. So this method is helpful in paging queries. If you just * want to prevent OOM at client, use setAllowPartialResults(true) is better. * @param batch the maximum number of values * @see Result#mayHaveMoreCellsInRow() */ public Scan setBatch(int batch) { if (this.hasFilter() && this.filter.hasFilterRow()) { throw new IncompatibleFilterException( "Cannot set batch on a scan using a filter" + " that returns true for filter.hasFilterRow"); } this.batch = batch; return this; }
3.68
flink_DeltaIteration_getSolutionSet
/** * Gets the solution set of the delta iteration. The solution set represents the state that is * kept across iterations. * * @return The solution set of the delta iteration. */ public SolutionSetPlaceHolder getSolutionSet() { return solutionSetPlaceholder; }
3.68
flink_TaskExecutor_tryLoadLocalAllocationSnapshots
/** * This method tries to repopulate the {@link JobTable} and {@link TaskSlotTable} from the local * filesystem in a best-effort manner. */ private void tryLoadLocalAllocationSnapshots() { Collection<SlotAllocationSnapshot> slotAllocationSnapshots = slotAllocationSnapshotPersistenceService.loadAllocationSnapshots(); log.debug("Recovered slot allocation snapshots {}.", slotAllocationSnapshots); final Set<AllocationID> allocatedSlots = new HashSet<>(); for (SlotAllocationSnapshot slotAllocationSnapshot : slotAllocationSnapshots) { try { allocateSlotForJob( slotAllocationSnapshot.getJobId(), slotAllocationSnapshot.getSlotID(), slotAllocationSnapshot.getAllocationId(), slotAllocationSnapshot.getResourceProfile(), slotAllocationSnapshot.getJobTargetAddress()); } catch (SlotAllocationException e) { log.debug("Cannot reallocate restored slot {}.", slotAllocationSnapshot, e); } allocatedSlots.add(slotAllocationSnapshot.getAllocationId()); } localStateStoresManager.retainLocalStateForAllocations(allocatedSlots); }
3.68
framework_VDragAndDropWrapper_hookHtml5DragStart
/** * @since 7.2 */ protected void hookHtml5DragStart(Element el) { hookHtml5DragStart(DOM.asOld(el)); }
3.68
hbase_RegionServerObserver_preClearCompactionQueues
/** * This will be called before clearing compaction queues * @param ctx the environment to interact with the framework and region server. */ default void preClearCompactionQueues( final ObserverContext<RegionServerCoprocessorEnvironment> ctx) throws IOException { }
3.68
flink_StateMachineExample_rpsFromSleep
// Used for backwards compatibility to convert legacy 'sleep' parameter to records per second. private static double rpsFromSleep(int sleep, int parallelism) { return (1000d / sleep) * parallelism; }
3.68
dubbo_SingleRouterChain_setInvokers
/** * Notify router chain of the initial addresses from registry at the first time. * Notify whenever addresses in registry change. */ public void setInvokers(BitList<Invoker<T>> invokers) { this.invokers = (invokers == null ? BitList.emptyList() : invokers); routers.forEach(router -> router.notify(this.invokers)); stateRouters.forEach(router -> router.notify(this.invokers)); }
3.68
dubbo_ReferenceConfig_getInvoker
/** * just for test * * @return */ @Deprecated @Transient public Invoker<?> getInvoker() { return invoker; }
3.68
zxing_CalendarResultHandler_addCalendarEvent
/** * Sends an intent to create a new calendar event by prepopulating the Add Event UI. Older * versions of the system have a bug where the event title will not be filled out. * * @param summary A description of the event * @param start The start time * @param allDay if true, event is considered to be all day starting from start time * @param end The end time (optional; can be < 0 if not specified) * @param location a text description of the event location * @param description a text description of the event itself * @param attendees attendees to invite */ private void addCalendarEvent(String summary, long start, boolean allDay, long end, String location, String description, String[] attendees) { Intent intent = new Intent(Intent.ACTION_INSERT); intent.setType("vnd.android.cursor.item/event"); intent.putExtra("beginTime", start); if (allDay) { intent.putExtra("allDay", true); } if (end < 0L) { if (allDay) { // + 1 day end = start + 24 * 60 * 60 * 1000; } else { end = start; } } intent.putExtra("endTime", end); intent.putExtra("title", summary); intent.putExtra("eventLocation", location); intent.putExtra("description", description); if (attendees != null) { intent.putExtra(Intent.EXTRA_EMAIL, attendees); // Documentation says this is either a String[] or comma-separated String, which is right? } try { // Do this manually at first rawLaunchIntent(intent); } catch (ActivityNotFoundException anfe) { Log.w(TAG, "No calendar app available that responds to " + Intent.ACTION_INSERT); // For calendar apps that don't like "INSERT": intent.setAction(Intent.ACTION_EDIT); launchIntent(intent); // Fail here for real if nothing can handle it } }
3.68
hudi_InternalSchemaUtils_collectRenameCols
/** * Try to find all renamed cols between oldSchema and newSchema. * * @param oldSchema oldSchema * @param newSchema newSchema which modified from oldSchema * @return renameCols Map. (k, v) -> (colNameFromNewSchema, colNameLastPartFromOldSchema) */ public static Map<String, String> collectRenameCols(InternalSchema oldSchema, InternalSchema newSchema) { List<String> colNamesFromWriteSchema = oldSchema.getAllColsFullName(); return colNamesFromWriteSchema.stream().filter(f -> { int fieldIdFromWriteSchema = oldSchema.findIdByName(f); // try to find the cols which has the same id, but have different colName; return newSchema.getAllIds().contains(fieldIdFromWriteSchema) && !newSchema.findFullName(fieldIdFromWriteSchema).equalsIgnoreCase(f); }).collect(Collectors.toMap(e -> newSchema.findFullName(oldSchema.findIdByName(e)), e -> { int lastDotIndex = e.lastIndexOf("."); return e.substring(lastDotIndex == -1 ? 0 : lastDotIndex + 1); })); }
3.68
flink_RemoteInputChannel_checkpointStarted
/** * Spills all queued buffers on checkpoint start. If barrier has already been received (and * reordered), spill only the overtaken buffers. */ public void checkpointStarted(CheckpointBarrier barrier) throws CheckpointException { synchronized (receivedBuffers) { if (barrier.getId() < lastBarrierId) { throw new CheckpointException( String.format( "Sequence number for checkpoint %d is not known (it was likely been overwritten by a newer checkpoint %d)", barrier.getId(), lastBarrierId), CheckpointFailureReason .CHECKPOINT_SUBSUMED); // currently, at most one active unaligned // checkpoint is possible } else if (barrier.getId() > lastBarrierId) { // This channel has received some obsolete barrier, older compared to the // checkpointId // which we are processing right now, and we should ignore that obsoleted checkpoint // barrier sequence number. resetLastBarrier(); } channelStatePersister.startPersisting( barrier.getId(), getInflightBuffersUnsafe(barrier.getId())); } }
3.68
hadoop_SnappyCodec_getDecompressorType
/** * Get the type of {@link Decompressor} needed by this {@link CompressionCodec}. * * @return the type of decompressor needed by this codec. */ @Override public Class<? extends Decompressor> getDecompressorType() { return SnappyDecompressor.class; }
3.68
hbase_QuotaSettingsFactory_limitTableSpace
/** * Creates a {@link QuotaSettings} object to limit the FileSystem space usage for the given table * to the given size in bytes. When the space usage is exceeded by the table, the provided * {@link SpaceViolationPolicy} is enacted on the table. * @param tableName The name of the table on which the quota should be applied. * @param sizeLimit The limit of a table's size in bytes. * @param violationPolicy The action to take when the quota is exceeded. * @return An {@link QuotaSettings} object. */ public static QuotaSettings limitTableSpace(final TableName tableName, long sizeLimit, final SpaceViolationPolicy violationPolicy) { return new SpaceLimitSettings(tableName, sizeLimit, violationPolicy); }
3.68
hmily_HmilySQLComputeUtils_executeQuery
/** * Execute query. * * @param connection connection * @param sql sql * @param parameters parameters * @return records * @throws SQLException SQL exception */ public static Collection<Map<String, Object>> executeQuery(final Connection connection, final String sql, final List<Object> parameters) throws SQLException { Collection<Map<String, Object>> result = new LinkedList<>(); try (PreparedStatement preparedStatement = connection.prepareStatement(sql)) { int parameterIndex = 1; for (Object each : parameters) { preparedStatement.setObject(parameterIndex, each); parameterIndex++; } ResultSet resultSet = preparedStatement.executeQuery(); ResultSetMetaData resultSetMetaData = resultSet.getMetaData(); while (resultSet.next()) { Map<String, Object> record = new LinkedHashMap<>(); for (int columnIndex = 1; columnIndex <= resultSetMetaData.getColumnCount(); columnIndex++) { record.put(resultSetMetaData.getColumnLabel(columnIndex), resultSet.getObject(columnIndex)); } result.add(record); } } return result; }
3.68
hudi_SparkBulkInsertHelper_bulkInsert
/** * Do bulk insert using WriteHandleFactory from the partitioner (i.e., partitioner.getWriteHandleFactory) */ public HoodieData<WriteStatus> bulkInsert(HoodieData<HoodieRecord<T>> inputRecords, String instantTime, HoodieTable<T, HoodieData<HoodieRecord<T>>, HoodieData<HoodieKey>, HoodieData<WriteStatus>> table, HoodieWriteConfig config, boolean performDedupe, BulkInsertPartitioner partitioner, boolean useWriterSchema, int parallelism) { return bulkInsert(inputRecords, instantTime, table, config, performDedupe, partitioner, useWriterSchema, parallelism, null); }
3.68
querydsl_BeanPath_createComparable
/** * Create a new Comparable typed path * * @param <A> * @param property property name * @param type property type * @return property path */ @SuppressWarnings("unchecked") protected <A extends Comparable> ComparablePath<A> createComparable(String property, Class<? super A> type) { return add(new ComparablePath<A>((Class) type, forProperty(property))); }
3.68
flink_DeltaIterationBase_setSolutionSetUnManaged
/** * Sets whether to keep the solution set in managed memory (safe against heap exhaustion) or * unmanaged memory (objects on heap). * * @param solutionSetUnManaged True to keep the solution set in unmanaged memory, false to keep * it in managed memory. * @see #isSolutionSetUnManaged() */ public void setSolutionSetUnManaged(boolean solutionSetUnManaged) { this.solutionSetUnManaged = solutionSetUnManaged; }
3.68
pulsar_AbstractHierarchicalLedgerManager_asyncProcessLedgersInSingleNode
/** * Process ledgers in a single zk node. * * <p> * for each ledger found in this zk node, processor#process(ledgerId) will be triggerred * to process a specific ledger. after all ledgers has been processed, the finalCb will * be called with provided context object. The RC passed to finalCb is decided by : * <ul> * <li> All ledgers are processed successfully, successRc will be passed. * <li> Either ledger is processed failed, failureRc will be passed. * </ul> * </p> * * @param path * Zk node path to store ledgers * @param processor * Processor provided to process ledger * @param finalCb * Callback object when all ledgers are processed * @param ctx * Context object passed to finalCb * @param successRc * RC passed to finalCb when all ledgers are processed successfully * @param failureRc * RC passed to finalCb when either ledger is processed failed */ protected void asyncProcessLedgersInSingleNode( final String path, final BookkeeperInternalCallbacks.Processor<Long> processor, final AsyncCallback.VoidCallback finalCb, final Object ctx, final int successRc, final int failureRc) { store.getChildren(path) .thenAccept(ledgerNodes -> { Set<Long> activeLedgers = HierarchicalLedgerUtils.ledgerListToSet(ledgerNodes, ledgerRootPath, path); if (log.isDebugEnabled()) { log.debug("Processing ledgers: {}", activeLedgers); } // no ledgers found, return directly if (activeLedgers.isEmpty()) { finalCb.processResult(successRc, null, ctx); return; } BookkeeperInternalCallbacks.MultiCallback mcb = new BookkeeperInternalCallbacks.MultiCallback(activeLedgers.size(), finalCb, ctx, successRc, failureRc); // start loop over all ledgers scheduler.submit(() -> { for (Long ledger : activeLedgers) { processor.process(ledger, mcb); } }); }).exceptionally(ex -> { finalCb.processResult(failureRc, null, ctx); return null; }); }
3.68
hbase_CompactionConfiguration_getCompactionRatioOffPeak
/** Returns Off peak Ratio used for compaction */ public double getCompactionRatioOffPeak() { return offPeakCompactionRatio; }
3.68
hadoop_CacheStats_getCacheCapacity
/** * Get the maximum amount of bytes we can cache. This is a constant. */ public long getCacheCapacity() { return maxBytes; }
3.68
pulsar_Record_getTopicName
/** * If the record originated from a topic, report the topic name. */ default Optional<String> getTopicName() { return Optional.empty(); }
3.68
flink_Configuration_getFloat
/** * Returns the value associated with the given config option as a float. If no value is mapped * under any key of the option, it returns the specified default instead of the option's default * value. * * @param configOption The configuration option * @param overrideDefault The value to return if no value was mapper for any key of the option * @return the configured value associated with the given config option, or the overrideDefault */ @PublicEvolving public float getFloat(ConfigOption<Float> configOption, float overrideDefault) { return getOptional(configOption).orElse(overrideDefault); }
3.68
hadoop_AbfsConfiguration_shouldTrackLatency
/** * Whether {@code AbfsClient} should track and send latency info back to storage servers. * * @return a boolean indicating whether latency should be tracked. */ public boolean shouldTrackLatency() { return this.trackLatency; }
3.68
hadoop_ErasureCodingPolicyState_read
/** Read from in. */ public static ErasureCodingPolicyState read(DataInput in) throws IOException { return fromValue(in.readByte()); }
3.68
flink_TemplateUtils_findInputOnlyTemplates
/** Hints that only declare an input. */ static Set<FunctionSignatureTemplate> findInputOnlyTemplates( Set<FunctionTemplate> global, Set<FunctionTemplate> local, Function<FunctionTemplate, FunctionResultTemplate> accessor) { return Stream.concat(global.stream(), local.stream()) .filter(t -> t.getSignatureTemplate() != null && accessor.apply(t) == null) .map(FunctionTemplate::getSignatureTemplate) .collect(Collectors.toCollection(LinkedHashSet::new)); }
3.68
hadoop_AzureBlobFileSystem_removeDefaultAcl
/** * Removes all default ACL entries from files and directories. * * @param path Path to modify * @throws IOException if an ACL could not be modified */ @Override public void removeDefaultAcl(final Path path) throws IOException { LOG.debug("AzureBlobFileSystem.removeDefaultAcl path: {}", path); TracingContext tracingContext = new TracingContext(clientCorrelationId, fileSystemId, FSOperationType.REMOVE_DEFAULT_ACL, true, tracingHeaderFormat, listener); if (!getIsNamespaceEnabled(tracingContext)) { throw new UnsupportedOperationException( "removeDefaultAcl is only supported by storage accounts with the " + "hierarchical namespace enabled."); } Path qualifiedPath = makeQualified(path); try { abfsStore.removeDefaultAcl(qualifiedPath, tracingContext); } catch (AzureBlobFileSystemException ex) { checkException(path, ex); } }
3.68
hadoop_QueueCapacityUpdateContext_getUpdateWarnings
/** * Returns all update warnings occurred in this update phase. * @return update warnings */ public List<QueueUpdateWarning> getUpdateWarnings() { return warnings; }
3.68
hbase_CompactSplit_shutdownLongCompactions
/** * Shutdown the long compaction thread pool. Should only be used in unit test to prevent long * compaction thread pool from stealing job from short compaction queue */ void shutdownLongCompactions() { this.longCompactions.shutdown(); }
3.68
flink_FlinkRexBuilder_areAssignable
/** Copied from the {@link RexBuilder} to fix the {@link RexBuilder#makeIn}. */ private boolean areAssignable(RexNode arg, List<? extends RexNode> bounds) { for (RexNode bound : bounds) { if (!SqlTypeUtil.inSameFamily(arg.getType(), bound.getType()) && !(arg.getType().isStruct() && bound.getType().isStruct())) { return false; } } return true; }
3.68
pulsar_LoadManagerShared_getNamespaceNameFromBundleName
// From a full bundle name, extract the namespace name. public static String getNamespaceNameFromBundleName(String bundleName) { // the bundle format is property/cluster/namespace/0x00000000_0xFFFFFFFF int pos = bundleName.lastIndexOf('/'); checkArgument(pos != -1); return bundleName.substring(0, pos); }
3.68
querydsl_SQLExpressions_lag
/** * expr evaluated at the row that is one row before the current row within the partition * * @param expr expression * @return lag(expr) */ public static <T> WindowOver<T> lag(Expression<T> expr) { return new WindowOver<T>(expr.getType(), SQLOps.LAG, expr); }
3.68
framework_AbstractMedia_setAutoplay
/** * Sets whether the media is to automatically start playback when enough * data has been loaded. * * @param autoplay */ public void setAutoplay(boolean autoplay) { getState().autoplay = autoplay; }
3.68
rocketmq-connect_DeadLetterQueueConfig_dlqTopicName
/** * get dlq topic name * * @return */ public String dlqTopicName() { return config.getString(DLQ_TOPIC_NAME_CONFIG, ""); }
3.68
framework_DefaultEditorEventHandler_getDeltaFromKeyDownEvent
/** * Returns the direction to which the cursor should move. * * @param event * the mouse event, not null. * @return the direction. May return null if the cursor should not move. */ protected CursorMoveDelta getDeltaFromKeyDownEvent( EditorDomEvent<T> event) { Event e = event.getDomEvent(); if (e.getKeyCode() == KEYCODE_MOVE_VERTICAL) { return e.getShiftKey() ? CursorMoveDelta.UP : CursorMoveDelta.DOWN; } else if (e.getKeyCode() == KEYCODE_MOVE_HORIZONTAL) { // Prevent tab out of Grid Editor event.getDomEvent().preventDefault(); return e.getShiftKey() ? CursorMoveDelta.LEFT : CursorMoveDelta.RIGHT; } return null; }
3.68
graphhopper_TileBasedElevationProvider_setBaseURL
/** * Specifies the service URL where to download the elevation data. An empty string should set it * to the default URL. Default is a provider-dependent URL which should work out of the box. */ public TileBasedElevationProvider setBaseURL(String baseUrl) { if (baseUrl == null || baseUrl.isEmpty()) throw new IllegalArgumentException("baseUrl cannot be empty"); this.baseUrl = baseUrl; return this; }
3.68
hadoop_ResourceRequestSet_addAndOverrideRRSet
/** * Merge a requestSet into this one. * * @param requestSet the requestSet to merge * @throws YarnException indicates exceptions from yarn servers. */ public void addAndOverrideRRSet(ResourceRequestSet requestSet) throws YarnException { if (requestSet == null) { return; } for (ResourceRequest rr : requestSet.getRRs()) { addAndOverrideRR(rr); } }
3.68
framework_AbstractComponent_isVisible
/* * (non-Javadoc) * * @see com.vaadin.ui.Component#isVisible() */ @Override public boolean isVisible() { return visible; }
3.68
dubbo_RpcServiceContext_getRequest
/** * Get the request object of the underlying RPC protocol, e.g. HttpServletRequest * * @return null if the underlying protocol doesn't provide support for getting request or the request is not of the specified type */ @Override @SuppressWarnings("unchecked") public <T> T getRequest(Class<T> clazz) { return (request != null && clazz.isAssignableFrom(request.getClass())) ? (T) request : null; }
3.68
flink_TaskSlotTable_freeSlot
/** * Try to free the slot. If the slot is empty it will set the state of the task slot to free and * return its index. If the slot is not empty, then it will set the state of the task slot to * releasing, fail all tasks and return -1. * * @param allocationId identifying the task slot to be freed * @throws SlotNotFoundException if there is not task slot for the given allocation id * @return Index of the freed slot if the slot could be freed; otherwise -1 */ default int freeSlot(AllocationID allocationId) throws SlotNotFoundException { return freeSlot(allocationId, new Exception("The task slot of this task is being freed.")); }
3.68
hbase_ByteBufferArray_read
/** * Transfers bytes from this buffers array into the given destination {@link ByteBuff} * @param offset start position in this big logical array. * @param dst the destination ByteBuff. Notice that its position will be advanced. * @return number of bytes read */ public int read(long offset, ByteBuff dst) { return internalTransfer(offset, dst, READER); }
3.68
flink_UniqueConstraint_getColumns
/** List of column names for which the primary key was defined. */ public List<String> getColumns() { return columns; }
3.68
pulsar_ConsumerConfiguration_setMessageListener
/** * Sets a {@link MessageListener} for the consumer * <p> * When a {@link MessageListener} is set, application will receive messages through it. Calls to * {@link Consumer#receive()} will not be allowed. * * @param messageListener * the listener object */ public ConsumerConfiguration setMessageListener(MessageListener<byte[]> messageListener) { Objects.requireNonNull(messageListener); this.messageListener = messageListener; conf.setMessageListener(new org.apache.pulsar.shade.client.api.v2.MessageListener<byte[]>() { @Override public void received(org.apache.pulsar.shade.client.api.v2.Consumer<byte[]> consumer, Message<byte[]> msg) { messageListener.received(new ConsumerV1Impl(consumer), msg); } @Override public void reachedEndOfTopic(org.apache.pulsar.shade.client.api.v2.Consumer<byte[]> consumer) { messageListener.reachedEndOfTopic(new ConsumerV1Impl(consumer)); } }); return this; }
3.68
framework_LayoutManager_unregisterDependency
/** * Registers that a ManagedLayout is no longer depending on the size of an * Element. * * @see #registerDependency(ManagedLayout, Element) * * @param owner * the ManagedLayout no longer depends on an element * @param element * the Element that that no longer needs to be measured */ public void unregisterDependency(ManagedLayout owner, Element element) { MeasuredSize measuredSize = getMeasuredSize(element, null); if (measuredSize == null) { return; } measuredSize.removeDependent(owner.getConnectorId()); stopMeasuringIfUnecessary(element); }
3.68
rocketmq-connect_StringConverter_configure
/** * Configure this class. * * @param configs configs in key/value pairs */ @Override public void configure(Map<String, ?> configs) { serializer.configure(configs); deserializer.configure(configs); }
3.68
morf_SqlQueryDataSetProducer_isTableEmpty
/** * @see org.alfasoftware.morf.dataset.DataSetProducer#isTableEmpty(java.lang.String) */ @Override public boolean isTableEmpty(String tableName) { return records(tableName).iterator().hasNext(); }
3.68
open-banking-gateway_DatasafeConfigurer_provideBouncyCastle
/** * Installs BouncyCastle as required by Datasafe. */ @PostConstruct void provideBouncyCastle() { if (null != Security.getProvider(BouncyCastleProvider.PROVIDER_NAME)) { return; } Security.addProvider(new BouncyCastleProvider()); }
3.68
hbase_ProcedureEvent_wakeIfSuspended
/** * Wakes up the suspended procedures only if the given {@code proc} is waiting on this event. * <p/> * Mainly used by region assignment to reject stale OpenRegionProcedure/CloseRegionProcedure. Use * with caution as it will cause performance issue if there are lots of procedures waiting on the * event. */ public synchronized boolean wakeIfSuspended(AbstractProcedureScheduler procedureScheduler, Procedure<?> proc) { if (suspendedProcedures.stream().anyMatch(p -> p.getProcId() == proc.getProcId())) { wake(procedureScheduler); return true; } return false; }
3.68
AreaShop_AreaShop_onDisable
/** * Called on shutdown or reload of the server. */ @Override public void onDisable() { Bukkit.getServer().getScheduler().cancelTasks(this); // Cleanup managers for(Manager manager : managers) { manager.shutdown(); } managers = null; fileManager = null; languageManager = null; commandManager = null; signLinkerManager = null; featureManager = null; // Cleanup plugins worldGuard = null; worldGuardInterface = null; worldEdit = null; worldEditInterface = null; // Cleanup other stuff chatprefix = null; debug = false; ready = false; HandlerList.unregisterAll(this); }
3.68
flink_InPlaceMutableHashTable_appendPointerAndCopyRecord
/** * Appends a pointer and a record. The record is read from a DataInputView (this will be the * staging area). * * @param pointer The pointer to write (Note: this is NOT the position to write to!) * @param input The DataInputView to read the record from * @param recordSize The size of the record * @return A pointer to the written data * @throws IOException (EOFException specifically, if memory ran out) */ public long appendPointerAndCopyRecord(long pointer, DataInputView input, int recordSize) throws IOException { setWritePosition(appendPosition); final long oldLastPosition = appendPosition; outView.writeLong(pointer); outView.write(input, recordSize); appendPosition += 8 + recordSize; return oldLastPosition; }
3.68
flink_VertexInputInfoComputationUtils_computeVertexInputInfoForPointwise
/** * Compute the {@link JobVertexInputInfo} for a {@link DistributionPattern#POINTWISE} edge. This * computation algorithm will evenly distribute subpartitions to downstream subtasks according * to the number of subpartitions. Different downstream subtasks consume roughly the same number * of subpartitions. * * @param sourceCount the parallelism of upstream * @param targetCount the parallelism of downstream * @param numOfSubpartitionsRetriever a retriever to get the number of subpartitions * @param isDynamicGraph whether is dynamic graph * @return the computed {@link JobVertexInputInfo} */ static JobVertexInputInfo computeVertexInputInfoForPointwise( int sourceCount, int targetCount, Function<Integer, Integer> numOfSubpartitionsRetriever, boolean isDynamicGraph) { final List<ExecutionVertexInputInfo> executionVertexInputInfos = new ArrayList<>(); if (sourceCount >= targetCount) { for (int index = 0; index < targetCount; index++) { int start = index * sourceCount / targetCount; int end = (index + 1) * sourceCount / targetCount; IndexRange partitionRange = new IndexRange(start, end - 1); IndexRange subpartitionRange = computeConsumedSubpartitionRange( index, 1, () -> numOfSubpartitionsRetriever.apply(start), isDynamicGraph, false); executionVertexInputInfos.add( new ExecutionVertexInputInfo(index, partitionRange, subpartitionRange)); } } else { for (int partitionNum = 0; partitionNum < sourceCount; partitionNum++) { int start = (partitionNum * targetCount + sourceCount - 1) / sourceCount; int end = ((partitionNum + 1) * targetCount + sourceCount - 1) / sourceCount; int numConsumers = end - start; IndexRange partitionRange = new IndexRange(partitionNum, partitionNum); // Variable used in lambda expression should be final or effectively final final int finalPartitionNum = partitionNum; for (int i = start; i < end; i++) { IndexRange subpartitionRange = computeConsumedSubpartitionRange( i, numConsumers, () -> numOfSubpartitionsRetriever.apply(finalPartitionNum), isDynamicGraph, false); executionVertexInputInfos.add( new ExecutionVertexInputInfo(i, partitionRange, subpartitionRange)); } } } return new JobVertexInputInfo(executionVertexInputInfos); }
3.68
dubbo_ProtocolConfig_getDispather
/** * typo, switch to use {@link #getDispatcher()} * * @deprecated {@link #getDispatcher()} */ @Deprecated @Parameter(excluded = true, attribute = false) public String getDispather() { return getDispatcher(); }
3.68
hibernate-validator_ComposingConstraintTree_validateComposingConstraints
/** * Validates all composing constraints recursively. * * @param validationContext Meta data about top level validation * @param valueContext Meta data for currently validated value * @param violatedConstraintValidatorContexts Used to accumulate constraint validator contexts that cause constraint violations * * @return Returns an instance of {@code CompositionResult} relevant for boolean composition of constraints */ private CompositionResult validateComposingConstraints(ValidationContext<?> validationContext, ValueContext<?, ?> valueContext, Collection<ConstraintValidatorContextImpl> violatedConstraintValidatorContexts) { CompositionResult compositionResult = new CompositionResult( true, false ); for ( ConstraintTree<?> tree : children ) { List<ConstraintValidatorContextImpl> tmpConstraintValidatorContexts = new ArrayList<>( 5 ); tree.validateConstraints( validationContext, valueContext, tmpConstraintValidatorContexts ); violatedConstraintValidatorContexts.addAll( tmpConstraintValidatorContexts ); if ( tmpConstraintValidatorContexts.isEmpty() ) { compositionResult.setAtLeastOneTrue( true ); // no need to further validate constraints, because at least one validation passed if ( descriptor.getCompositionType() == OR ) { break; } } else { compositionResult.setAllTrue( false ); if ( descriptor.getCompositionType() == AND && ( validationContext.isFailFastModeEnabled() || descriptor.isReportAsSingleViolation() ) ) { break; } } } return compositionResult; }
3.68
hadoop_ShutdownThreadsHelper_shutdownThread
/** * @param thread {@link Thread to be shutdown} * @param timeoutInMilliSeconds time to wait for thread to join after being * interrupted * @return <tt>true</tt> if the thread is successfully interrupted, * <tt>false</tt> otherwise */ public static boolean shutdownThread(Thread thread, long timeoutInMilliSeconds) { if (thread == null) { return true; } try { thread.interrupt(); thread.join(timeoutInMilliSeconds); return true; } catch (InterruptedException ie) { LOG.warn("Interrupted while shutting down thread - " + thread.getName()); return false; } }
3.68
flink_Transformation_setDescription
/** Changes the description of this {@code Transformation}. */ public void setDescription(String description) { this.description = Preconditions.checkNotNull(description); }
3.68
flink_ThriftObjectConversions_toFlinkTableKinds
/** Counterpart of the {@code org.apache.hive.service.cli.operation.TableTypeMapping}. */ public static Set<TableKind> toFlinkTableKinds(@Nullable List<String> tableTypes) { Set<TableKind> tableKinds = new HashSet<>(); if (tableTypes == null || tableTypes.isEmpty()) { tableKinds.addAll(Arrays.asList(TableKind.values())); return tableKinds; } for (String tableType : tableTypes) { if (!TABLE_TYPE_MAPPINGS.containsKey(tableType)) { throw new UnsupportedOperationException( String.format( "Can not find the mapping from the TableType '%s' to the Flink TableKind. Please remove it from the specified tableTypes.", tableType)); } tableKinds.add(TABLE_TYPE_MAPPINGS.get(tableType)); } return tableKinds; }
3.68
hudi_BoundedInMemoryQueue_seal
/** * Puts an empty entry to queue to denote termination. */ @Override public void seal() { // done queueing records notifying queue-reader. isWriteDone.set(true); }
3.68
flink_HsSubpartitionConsumer_setMemoryDataView
/** * Set {@link HsDataView} for this subpartition, this method only called when {@link * HsSubpartitionFileReader} is creating. */ void setMemoryDataView(HsDataView memoryDataView) { synchronized (lock) { checkState( this.memoryDataView == null, "repeatedly set memory data view is not allowed."); this.memoryDataView = memoryDataView; } }
3.68
flink_FlinkContainersSettings_jarPaths
/** * Sets the {@code jarPaths} and returns a reference to this Builder enabling method * chaining. * * @param jarPaths The {@code jarPaths} to set. * @return A reference to this Builder. */ public Builder jarPaths(Collection<String> jarPaths) { this.jarPaths = jarPaths; return this; }
3.68
hadoop_HdfsLocatedFileStatus_getLocatedBlocks
/** * Get block locations for this entity, in HDFS format. * See {@link #makeQualifiedLocated(URI, Path)}. * See {@link DFSUtilClient#locatedBlocks2Locations(LocatedBlocks)}. * @return block locations */ public LocatedBlocks getLocatedBlocks() { return hdfsloc; }
3.68
hbase_BitSetNode_convert
/** * Convert to * org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode * protobuf. */ public ProcedureProtos.ProcedureStoreTracker.TrackerNode convert() { ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder builder = ProcedureProtos.ProcedureStoreTracker.TrackerNode.newBuilder(); builder.setStartId(start); for (int i = 0; i < modified.length; ++i) { builder.addUpdated(modified[i]); builder.addDeleted(deleted[i]); } return builder.build(); }
3.68
flink_HiveTableInputFormat_addSchemaToConf
// Hive readers may rely on the schema info in configuration private void addSchemaToConf(JobConf jobConf) { // set columns/types -- including partition cols List<String> typeStrs = Arrays.stream(fieldTypes) .map(t -> HiveTypeUtil.toHiveTypeInfo(t, true).toString()) .collect(Collectors.toList()); jobConf.set(IOConstants.COLUMNS, String.join(",", fieldNames)); jobConf.set(IOConstants.COLUMNS_TYPES, String.join(",", typeStrs)); // set schema evolution -- excluding partition cols int numNonPartCol = fieldNames.length - partitionKeys.size(); jobConf.set( SCHEMA_EVOLUTION_COLUMNS, String.join(",", Arrays.copyOfRange(fieldNames, 0, numNonPartCol))); jobConf.set( SCHEMA_EVOLUTION_COLUMNS_TYPES, String.join(",", typeStrs.subList(0, numNonPartCol))); // in older versions, parquet reader also expects the selected col indices in conf, // excluding part cols String readColIDs = Arrays.stream(selectedFields) .filter(i -> i < numNonPartCol) .mapToObj(String::valueOf) .collect(Collectors.joining(",")); jobConf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, readColIDs); }
3.68
framework_PushRequestHandler_initAtmosphere
/** * Initializes Atmosphere for the given ServletConfiguration * * @since 7.5.0 * @param vaadinServletConfig * The servlet configuration for the servlet which should have * Atmosphere support */ static AtmosphereFramework initAtmosphere( final ServletConfig vaadinServletConfig) { AtmosphereFramework atmosphere = new AtmosphereFramework(false, false) { @Override protected void analytics() { // Overridden to disable version number check } @Override public AtmosphereFramework addInitParameter(String name, String value) { if (vaadinServletConfig.getInitParameter(name) == null) { super.addInitParameter(name, value); } return this; } }; atmosphere.addAtmosphereHandler("/*", new PushAtmosphereHandler()); atmosphere.addInitParameter(ApplicationConfig.BROADCASTER_CACHE, UUIDBroadcasterCache.class.getName()); atmosphere.addInitParameter(ApplicationConfig.ANNOTATION_PROCESSOR, VoidAnnotationProcessor.class.getName()); atmosphere.addInitParameter(ApplicationConfig.PROPERTY_SESSION_SUPPORT, "true"); atmosphere.addInitParameter(ApplicationConfig.MESSAGE_DELIMITER, String.valueOf(PushConstants.MESSAGE_DELIMITER)); atmosphere.addInitParameter( ApplicationConfig.DROP_ACCESS_CONTROL_ALLOW_ORIGIN_HEADER, "false"); final String bufferSize = String .valueOf(PushConstants.WEBSOCKET_BUFFER_SIZE); atmosphere.addInitParameter(ApplicationConfig.WEBSOCKET_BUFFER_SIZE, bufferSize); atmosphere.addInitParameter(ApplicationConfig.WEBSOCKET_MAXTEXTSIZE, bufferSize); atmosphere.addInitParameter(ApplicationConfig.WEBSOCKET_MAXBINARYSIZE, bufferSize); atmosphere.addInitParameter( ApplicationConfig.PROPERTY_ALLOW_SESSION_TIMEOUT_REMOVAL, "false"); // This prevents Atmosphere from recreating a broadcaster after it has // already been destroyed when the servlet is being undeployed // (see #20026) atmosphere.addInitParameter(ApplicationConfig.RECOVER_DEAD_BROADCASTER, "false"); // Disable Atmosphere's message about commercial support atmosphere.addInitParameter("org.atmosphere.cpr.showSupportMessage", "false"); try { atmosphere.init(vaadinServletConfig); // Ensure the client-side knows how to split the message stream // into individual messages when using certain transports AtmosphereInterceptor trackMessageSize = new TrackMessageSizeInterceptor(); trackMessageSize.configure(atmosphere.getAtmosphereConfig()); atmosphere.interceptor(trackMessageSize); } catch (ServletException e) { throw new RuntimeException("Atmosphere init failed", e); } return atmosphere; }
3.68
hbase_JVM_isAarch64
/** * Check if the arch is aarch64; * @return whether this is aarch64 or not. */ public static boolean isAarch64() { return aarch64; }
3.68
flink_GenericDataSinkBase_accept
/** * Accepts the visitor and applies it this instance. This method applies the visitor in a * depth-first traversal. The visitors pre-visit method is called and, if returning * <tt>true</tt>, the visitor is recursively applied on the single input. After the recursion * returned, the post-visit method is called. * * @param visitor The visitor. * @see org.apache.flink.util.Visitable#accept(org.apache.flink.util.Visitor) */ @Override public void accept(Visitor<Operator<?>> visitor) { boolean descend = visitor.preVisit(this); if (descend) { this.input.accept(visitor); visitor.postVisit(this); } }
3.68
hbase_ColumnFamilyDescriptorBuilder_setMaxVersions
/** * Set the maximum number of versions to retain. * @param maxVersions maximum number of versions * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setMaxVersions(int maxVersions) { if (maxVersions <= 0) { // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions". // Until there is support, consider 0 or < 0 -- a configuration error. throw new IllegalArgumentException("Maximum versions must be positive"); } if (maxVersions < this.getMinVersions()) { throw new IllegalArgumentException( "Set MaxVersion to " + maxVersions + " while minVersion is " + this.getMinVersions() + ". Maximum versions must be >= minimum versions "); } setValue(MAX_VERSIONS_BYTES, Integer.toString(maxVersions)); return this; }
3.68
framework_ServerRpcMethodInvocation_doFindInvocationMethod
/** * Tries to find the method from the class by looping through available * methods. * * @param targetType * @param methodName * @param parameterCount * @return */ private Method doFindInvocationMethod(Class<?> targetType, String methodName, int parameterCount) { Method[] methods = targetType.getMethods(); for (Method method : methods) { Class<?>[] parameterTypes = method.getParameterTypes(); if (method.getName().equals(methodName) && parameterTypes.length == parameterCount) { return method; } } return null; }
3.68
framework_ComboBoxElement_selectByTextFromPopup
/** * Selects, without filtering, the first option in the ComboBox which * matches the given text. * * @param text * the text of the option to select */ private void selectByTextFromPopup(String text) { // This method assumes there is no need to touch the filter string // 1. Find first page // 2. Select first matching text if found // 3. Iterate towards end while (openPrevPage()) { // Scroll until beginning } do { if (selectSuggestion(text)) { return; } } while (openNextPage()); }
3.68
hudi_OptionsResolver_getIndexType
/** * Returns the index type. */ public static HoodieIndex.IndexType getIndexType(Configuration conf) { return HoodieIndex.IndexType.valueOf(conf.getString(FlinkOptions.INDEX_TYPE)); }
3.68
hadoop_StageConfig_withTaskId
/** * Set builder value. * @param value new value * @return this */ public StageConfig withTaskId(final String value) { checkOpen(); taskId = value; return this; }
3.68
framework_Profiler_enter
/** * Enters a named block. There should always be a matching invocation of * {@link #leave(String)} when leaving the block. Calls to this method will * be removed by the compiler unless profiling is enabled. * * @param name * the name of the entered block */ public static void enter(String name) { if (isEnabled()) { logGwtEvent(name, "begin"); } }
3.68
framework_UIConnector_showServerDesign
/** * Sends a request to the server to print a design to the console for the * given component. * * @since 7.5 * @param connector * the component connector to output a declarative design for */ public void showServerDesign(ServerConnector connector) { getRpcProxy(DebugWindowServerRpc.class).showServerDesign(connector); }
3.68
hbase_SnapshotInfo_getMobStoreFilesSize
/** Returns the total size of the store files in the mob store */ public long getMobStoreFilesSize() { return hfilesMobSize.get(); }
3.68
druid_FileNodeListener_refresh
/** * Load the properties file and diff with the stored Properties. * * @return A List of the modification */ @Override public List<NodeEvent> refresh() { Properties originalProperties = PropertiesUtils.loadProperties(file); List<String> nameList = PropertiesUtils.loadNameList(originalProperties, getPrefix()); Properties properties = new Properties(); for (String n : nameList) { String url = originalProperties.getProperty(n + ".url"); String username = originalProperties.getProperty(n + ".username"); String password = originalProperties.getProperty(n + ".password"); if (url == null || url.isEmpty()) { LOG.warn(n + ".url is EMPTY! IGNORE!"); continue; } else { properties.setProperty(n + ".url", url); } if (username == null || username.isEmpty()) { LOG.debug(n + ".username is EMPTY. Maybe you should check the config."); } else { properties.setProperty(n + ".username", username); } if (password == null || password.isEmpty()) { LOG.debug(n + ".password is EMPTY. Maybe you should check the config."); } else { properties.setProperty(n + ".password", password); } } List<NodeEvent> events = NodeEvent.getEventsByDiffProperties(getProperties(), properties); if (events != null && !events.isEmpty()) { LOG.info(events.size() + " different(s) detected."); setProperties(properties); } return events; }
3.68
hudi_OptionsInference_setupClientId
/** * Utilities that help to auto generate the client id for multi-writer scenarios. * It basically handles two cases: * * <ul> * <li>find the next client id for the new job;</li> * <li>clean the existing inactive client heartbeat files.</li> * </ul> * * @see ClientIds */ public static void setupClientId(Configuration conf) { if (OptionsResolver.isMultiWriter(conf)) { // explicit client id always has higher priority if (!conf.contains(FlinkOptions.WRITE_CLIENT_ID)) { try (ClientIds clientIds = ClientIds.builder().conf(conf).build()) { String clientId = clientIds.nextId(conf); conf.setString(FlinkOptions.WRITE_CLIENT_ID, clientId); } } } }
3.68
flink_DefaultJobLeaderIdService_isStarted
/** * Checks whether the service has been started. * * @return True if the service has been started; otherwise false */ public boolean isStarted() { return jobLeaderIdActions != null; }
3.68
hbase_RegionCoprocessorHost_preReplayWALs
/** * @param info the RegionInfo for this region * @param edits the file of recovered edits */ public void preReplayWALs(final RegionInfo info, final Path edits) throws IOException { execOperation( coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult(true) { @Override public void call(RegionObserver observer) throws IOException { observer.preReplayWALs(this, info, edits); } }); }
3.68
flink_KeyGroupRangeAssignment_computeDefaultMaxParallelism
/** * Computes a default maximum parallelism from the operator parallelism. This is used in case * the user has not explicitly configured a maximum parallelism to still allow a certain degree * of scale-up. * * @param operatorParallelism the operator parallelism as basis for computation. * @return the computed default maximum parallelism. */ public static int computeDefaultMaxParallelism(int operatorParallelism) { checkParallelismPreconditions(operatorParallelism); return Math.min( Math.max( MathUtils.roundUpToPowerOfTwo( operatorParallelism + (operatorParallelism / 2)), DEFAULT_LOWER_BOUND_MAX_PARALLELISM), UPPER_BOUND_MAX_PARALLELISM); }
3.68
morf_H2Dialect_dropPrimaryKeyConstraintStatement
/** * @param table The table whose primary key should be dropped * @return The statement */ private String dropPrimaryKeyConstraintStatement(Table table) { return "ALTER TABLE " + schemaNamePrefix() + table.getName() + " DROP PRIMARY KEY"; }
3.68
hbase_HRegionFileSystem_getRegionInfoFileContent
/** Returns Content of the file we write out to the filesystem under a region */ private static byte[] getRegionInfoFileContent(final RegionInfo hri) throws IOException { return RegionInfo.toDelimitedByteArray(hri); }
3.68
querydsl_MathExpressions_tanh
/** * Create a {@code tanh(num)} expression * * <p>Returns the hyperbolic tangent of num radians.</p> * * @param num numeric expression * @return tanh(num) */ public static <A extends Number & Comparable<?>> NumberExpression<Double> tanh(Expression<A> num) { return Expressions.numberOperation(Double.class, Ops.MathOps.TANH, num); }
3.68
hadoop_AbfsRestOperation_execute
/** * Execute a AbfsRestOperation. Track the Duration of a request if * abfsCounters isn't null. * @param tracingContext TracingContext instance to track correlation IDs */ public void execute(TracingContext tracingContext) throws AzureBlobFileSystemException { // Since this might be a sub-sequential or parallel rest operation // triggered by a single file system call, using a new tracing context. lastUsedTracingContext = createNewTracingContext(tracingContext); try { IOStatisticsBinding.trackDurationOfInvocation(abfsCounters, AbfsStatistic.getStatNameFromHttpCall(method), () -> completeExecute(lastUsedTracingContext)); } catch (AzureBlobFileSystemException aze) { throw aze; } catch (IOException e) { throw new UncheckedIOException("Error while tracking Duration of an " + "AbfsRestOperation call", e); } }
3.68
flink_RpcSystem_close
/** Hook to cleanup resources, like common thread pools or classloaders. */ @Override default void close() {}
3.68
hadoop_ProducerConsumer_blockingTake
/** * Blocking take from ProducerConsumer output queue (catches exceptions and * retries forever). * * @return item returned by processor's processItem(). */ public WorkReport<R> blockingTake() { while (true) { try { WorkReport<R> report = outputQueue.take(); workCnt.decrementAndGet(); return report; } catch (InterruptedException ie) { LOG.debug("Retrying in blockingTake..."); } } }
3.68
AreaShop_AddedFriendEvent_getFriend
/** * Get the OfflinePlayer that is getting added as friend. * @return The friend that is getting added */ public OfflinePlayer getFriend() { return friend; }
3.68
open-banking-gateway_ConsentAuthorizationEncryptionServiceProvider_forSecretKey
/** * Create encryption service for a given secret key. * @param key Secret key to encrypt/decrypt data with. * @return Symmetric encryption service. */ public EncryptionService forSecretKey(SecretKeyWithIv key) { String keyId = Hashing.sha256().hashBytes(key.getSecretKey().getEncoded()).toString(); return oper.encryptionService(keyId, key); }
3.68
framework_DefaultFieldFactory_createFieldByPropertyType
/** * Creates fields based on the property type. * <p> * The default field type is {@link TextField}. Other field types generated * by this method: * <p> * <b>Boolean</b>: {@link CheckBox}.<br/> * <b>Date</b>: {@link DateField}(resolution: day).<br/> * <b>Item</b>: {@link Form}. <br/> * <b>default field type</b>: {@link TextField}. * <p> * * @param type * the type of the property * @return the most suitable generic {@link LegacyField} for given type */ public static Field<?> createFieldByPropertyType(Class<?> type) { // Null typed properties can not be edited if (type == null) { return null; } // Item field if (Item.class.isAssignableFrom(type)) { return new Form(); } // Date field if (Date.class.isAssignableFrom(type)) { final DateField df = new DateField(); df.setResolution(DateField.RESOLUTION_DAY); return df; } // Boolean field if (Boolean.class.isAssignableFrom(type)) { return new CheckBox(); } return new TextField(); }
3.68
pulsar_ResourceQuota_add
/** * Add quota. * * @param quota * <code>ResourceQuota</code> to add */ public void add(ResourceQuota quota) { this.msgRateIn += quota.msgRateIn; this.msgRateOut += quota.msgRateOut; this.bandwidthIn += quota.bandwidthIn; this.bandwidthOut += quota.bandwidthOut; this.memory += quota.memory; }
3.68
flink_MailboxProcessor_drain
/** * Finishes running all mails in the mailbox. If no concurrent write operations occurred, the * mailbox must be empty after this method. */ public void drain() throws Exception { for (final Mail mail : mailbox.drain()) { runMail(mail); } }
3.68
flink_ContextResolvedTable_generateAnonymousStringIdentifier
/** * This method tries to return the connector name of the table, trying to provide a bit more * helpful toString for anonymous tables. It's only to help users to debug, and its return value * should not be relied on. */ private static String generateAnonymousStringIdentifier( @Nullable String hint, ResolvedCatalogBaseTable<?> resolvedTable) { // Planner can do some fancy optimizations' logic squashing two sources together in the same // operator. Because this logic is string based, anonymous tables still need some kind of // unique string based identifier that can be used later by the planner. if (hint == null) { try { hint = resolvedTable.getOptions().get(FactoryUtil.CONNECTOR.key()); } catch (Exception ignored) { } } int id = uniqueId.incrementAndGet(); if (hint == null) { return "*anonymous$" + id + "*"; } return "*anonymous_" + hint + "$" + id + "*"; }
3.68
hudi_BaseHoodieTableServiceClient_completeCompaction
/** * Commit Compaction and track metrics. */ protected void completeCompaction(HoodieCommitMetadata metadata, HoodieTable table, String compactionCommitTime) { this.context.setJobStatus(this.getClass().getSimpleName(), "Collect compaction write status and commit compaction: " + config.getTableName()); List<HoodieWriteStat> writeStats = metadata.getWriteStats(); handleWriteErrors(writeStats, TableServiceType.COMPACT); final HoodieInstant compactionInstant = HoodieTimeline.getCompactionInflightInstant(compactionCommitTime); try { this.txnManager.beginTransaction(Option.of(compactionInstant), Option.empty()); finalizeWrite(table, compactionCommitTime, writeStats); // commit to data table after committing to metadata table. writeTableMetadata(table, compactionCommitTime, metadata, context.emptyHoodieData()); LOG.info("Committing Compaction " + compactionCommitTime + ". Finished with result " + metadata); CompactHelpers.getInstance().completeInflightCompaction(table, compactionCommitTime, metadata); } finally { this.txnManager.endTransaction(Option.of(compactionInstant)); } WriteMarkersFactory.get(config.getMarkersType(), table, compactionCommitTime) .quietDeleteMarkerDir(context, config.getMarkersDeleteParallelism()); if (compactionTimer != null) { long durationInMs = metrics.getDurationInMs(compactionTimer.stop()); HoodieActiveTimeline.parseDateFromInstantTimeSafely(compactionCommitTime).ifPresent(parsedInstant -> metrics.updateCommitMetrics(parsedInstant.getTime(), durationInMs, metadata, COMPACTION_ACTION) ); } LOG.info("Compacted successfully on commit " + compactionCommitTime); }
3.68
druid_DruidDataSource_initFromSPIServiceLoader
/** * load filters from SPI ServiceLoader * * @see ServiceLoader */ private void initFromSPIServiceLoader() { if (loadSpifilterSkip) { return; } if (autoFilters == null) { List<Filter> filters = new ArrayList<Filter>(); ServiceLoader<Filter> autoFilterLoader = ServiceLoader.load(Filter.class); for (Filter filter : autoFilterLoader) { AutoLoad autoLoad = filter.getClass().getAnnotation(AutoLoad.class); if (autoLoad != null && autoLoad.value()) { filters.add(filter); } } autoFilters = filters; } for (Filter filter : autoFilters) { if (LOG.isInfoEnabled()) { LOG.info("load filter from spi :" + filter.getClass().getName()); } addFilter(filter); } }
3.68
flink_Conditions_fulfill
/** Generic condition to check fulfillment of a predicate. */ public static <T extends HasName> ArchCondition<T> fulfill(DescribedPredicate<T> predicate) { return new ArchCondition<T>(predicate.getDescription()) { @Override public void check(T item, ConditionEvents events) { if (!predicate.test(item)) { final String message = String.format( "%s does not satisfy: %s", item.getName(), predicate.getDescription()); events.add(SimpleConditionEvent.violated(item, message)); } } }; }
3.68
rocketmq-connect_WorkerTask_transitionTo
/** * change task target state * * @param state */ public void transitionTo(TargetState state) { synchronized (this) { // ignore the state change if we are stopping if (isStopping()) { return; } // not equal set if (this.targetState != state) { this.targetState = state; // notify thread continue run this.notifyAll(); } } }
3.68
flink_BytesMap_lookup
/** * @param key by which looking up the value in the hash map. Only support the key in the * BinaryRowData form who has only one MemorySegment. * @return {@link LookupInfo} */ public LookupInfo<K, V> lookup(K key) { final int hashCode1 = key.hashCode(); int newPos = hashCode1 & numBucketsMask; // which segment contains the bucket int bucketSegmentIndex = newPos >>> numBucketsPerSegmentBits; // offset of the bucket in the segment int bucketOffset = (newPos & numBucketsPerSegmentMask) << BUCKET_SIZE_BITS; boolean found = false; int step = STEP_INCREMENT; int hashCode2 = 0; int findElementPtr; try { do { findElementPtr = bucketSegments.get(bucketSegmentIndex).getInt(bucketOffset); if (findElementPtr == END_OF_LIST) { // This is a new key. break; } else { final int storedHashCode = bucketSegments .get(bucketSegmentIndex) .getInt(bucketOffset + ELEMENT_POINT_LENGTH); if (hashCode1 == storedHashCode) { recordArea.setReadPosition(findElementPtr); if (recordArea.readKeyAndEquals(key)) { // we found an element with a matching key, and not just a hash // collision found = true; reusedValue = recordArea.readValue(reusedValue); break; } } } if (step == 1) { hashCode2 = calcSecondHashCode(hashCode1); } newPos = (hashCode1 + step * hashCode2) & numBucketsMask; // which segment contains the bucket bucketSegmentIndex = newPos >>> numBucketsPerSegmentBits; // offset of the bucket in the segment bucketOffset = (newPos & numBucketsPerSegmentMask) << BUCKET_SIZE_BITS; step += STEP_INCREMENT; } while (true); } catch (IOException ex) { throw new RuntimeException( "Error reading record from the aggregate map: " + ex.getMessage(), ex); } reuseLookupInfo.set(found, hashCode1, key, reusedValue, bucketSegmentIndex, bucketOffset); return reuseLookupInfo; }
3.68
pulsar_SaslRoleTokenSigner_verifyAndExtract
/** * Verifies a signed string and extracts the original string. * * @param signedStr the signed string to verify and extract. * * @return the extracted original string. * * @throws AuthenticationException thrown if the given string is not a signed string or if the signature is invalid. */ public String verifyAndExtract(String signedStr) throws AuthenticationException { int index = signedStr.lastIndexOf(SIGNATURE); if (index == -1) { throw new AuthenticationException("Invalid signed text: " + signedStr); } String originalSignature = signedStr.substring(index + SIGNATURE.length()); String rawValue = signedStr.substring(0, index); String currentSignature = computeSignature(rawValue); if (!MessageDigest.isEqual(originalSignature.getBytes(), currentSignature.getBytes())){ throw new AuthenticationException("Invalid signature"); } return rawValue; }
3.68
hudi_HoodieTable_getBaseFileOnlyView
/** * Get the base file only view of the file system for this table. */ public BaseFileOnlyView getBaseFileOnlyView() { return getViewManager().getFileSystemView(metaClient); }
3.68