name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hbase_ZKUtil_partitionOps
/** * Partition the list of {@code ops} by size (using {@link #estimateSize(ZKUtilOp)}). */ static List<List<ZKUtilOp>> partitionOps(List<ZKUtilOp> ops, int maxPartitionSize) { List<List<ZKUtilOp>> partitionedOps = new ArrayList<>(); List<ZKUtilOp> currentPartition = new ArrayList<>(); int currentPartitionSize = 0; partitionedOps.add(currentPartition); Iterator<ZKUtilOp> iter = ops.iterator(); while (iter.hasNext()) { ZKUtilOp currentOp = iter.next(); int currentOpSize = estimateSize(currentOp); // Roll a new partition if necessary // If the current partition is empty, put the element in there anyways. // We can roll a new partition if we get another element if (!currentPartition.isEmpty() && currentOpSize + currentPartitionSize > maxPartitionSize) { currentPartition = new ArrayList<>(); partitionedOps.add(currentPartition); currentPartitionSize = 0; } // Add the current op to the partition currentPartition.add(currentOp); // And record its size currentPartitionSize += currentOpSize; } return partitionedOps; }
3.68
hbase_BulkLoadHFilesTool_getUniqueName
// unique file name for the table private String getUniqueName() { return UUID.randomUUID().toString().replaceAll("-", ""); }
3.68
morf_ColumnTypeBean_getType
/** * @return the type */ @Override public DataType getType() { return type; }
3.68
flink_CommittableCollector_addMessage
/** * Adds a {@link CommittableMessage} to the collector to hold it until emission. * * @param message either {@link CommittableSummary} or {@link CommittableWithLineage} */ public void addMessage(CommittableMessage<CommT> message) { if (message instanceof CommittableSummary) { addSummary((CommittableSummary<CommT>) message); } else if (message instanceof CommittableWithLineage) { addCommittable((CommittableWithLineage<CommT>) message); } }
3.68
hadoop_RegistryTypeUtils_hostnamePortPair
/** * Create a (hostname, port) address pair * @param address socket address whose hostname and port are used for the * generated address. * @return a 1 entry map. */ public static Map<String, String> hostnamePortPair(InetSocketAddress address) { return hostnamePortPair(address.getHostName(), address.getPort()); }
3.68
flink_ExistingSavepoint_window
/** * Read window state from an operator in a {@code Savepoint}. This method supports reading from * any type of window. * * @param windowSerializer The serializer used for the window type. * @return A {@link WindowReader}. */ public <W extends Window> WindowReader<W> window(TypeSerializer<W> windowSerializer) { Preconditions.checkNotNull(windowSerializer, "The window serializer must not be null"); return new WindowReader<>(env, metadata, stateBackend, windowSerializer); }
3.68
flink_SingletonResultIterator_set
/** * Sets the single record to be returned by this iterator. The offset and records-to-skip count * will be used as provided here for the returned {@link RecordAndPosition}, meaning they need * to point to AFTER this specific record (because a checkpoint taken after the record was * processed needs to resume from after this record). */ public void set(final E element, final long offset, final long skipCount) { this.recordAndPosition.set(element, offset, skipCount); this.element = this.recordAndPosition; }
3.68
hbase_HRegionFileSystem_commitMergedRegion
/** * Commit a merged region, making it ready for use. */ public void commitMergedRegion(List<Path> allMergedFiles, MasterProcedureEnv env) throws IOException { Path regionDir = getMergesDir(regionInfoForFs); if (regionDir != null && fs.exists(regionDir)) { // Write HRI to a file in case we need to recover hbase:meta Path regionInfoFile = new Path(regionDir, REGION_INFO_FILE); byte[] regionInfoContent = getRegionInfoFileContent(regionInfo); writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent); insertRegionFilesIntoStoreTracker(allMergedFiles, env, this); } }
3.68
hmily_HmilyRepositoryNode_getRootPathPrefix
/** * Get root path prefix. * * @return root path prefix */ public String getRootPathPrefix() { return ROOT_PATH_PREFIX; }
3.68
hbase_RequestConverter_buildRegionOpenInfo
/** * Create a RegionOpenInfo based on given region info and version of offline node */ public static RegionOpenInfo buildRegionOpenInfo(RegionInfo region, List<ServerName> favoredNodes, long openProcId) { RegionOpenInfo.Builder builder = RegionOpenInfo.newBuilder(); builder.setRegion(ProtobufUtil.toRegionInfo(region)); if (favoredNodes != null) { for (ServerName server : favoredNodes) { builder.addFavoredNodes(ProtobufUtil.toServerName(server)); } } builder.setOpenProcId(openProcId); return builder.build(); }
3.68
hudi_HoodieInternalConfig_getBulkInsertIsPartitionRecordsSorted
/** * Returns if partition records are sorted or not. * * @param propertyValue value for property BULKINSERT_ARE_PARTITIONER_RECORDS_SORTED. * @return the property value. */ public static Boolean getBulkInsertIsPartitionRecordsSorted(String propertyValue) { return propertyValue != null ? Boolean.parseBoolean(propertyValue) : DEFAULT_BULKINSERT_ARE_PARTITIONER_RECORDS_SORTED; }
3.68
framework_VaadinSession_readObject
/** * Override default deserialization logic to account for transient * {@link #pendingAccessQueue}. */ private void readObject(ObjectInputStream stream) throws IOException, ClassNotFoundException { Map<Class<?>, CurrentInstance> old = CurrentInstance.setCurrent(this); try { stream.defaultReadObject(); pendingAccessQueue = new ConcurrentLinkedQueue<>(); } finally { CurrentInstance.restoreInstances(old); } }
3.68
morf_ChangeColumn_applyChange
/** * Changes a column definition from the start point to the end point. * * @param schema {@link Schema} to apply the change against resulting in new metadata. * @param columnStartPoint the start definition for the column * @param columnEndPoint the end definition for the column * @return MetaData with {@link SchemaChange} applied. */ private Schema applyChange(Schema schema, Column columnStartPoint, Column columnEndPoint) { // Now setup the new table definition Table original = schema.getTable(tableName); boolean foundMatch = false; // Copy the columns names into a list of strings for column sort order List<String> columns = new ArrayList<>(); Set<String> processedColumns = new HashSet<>(); for (Column column : original.columns()) { String currentColumnName = column.getName(); // If we're looking at the column being changed... if (column.getUpperCaseName().equalsIgnoreCase(columnStartPoint.getUpperCaseName())) { // check the current column matches the specified source column CollectingDifferenceWriter differences = new SchemaHomology.CollectingDifferenceWriter(); if (!new SchemaHomology(differences, "trial schema", "source column").columnsMatch(columnStartPoint, column)) { throw new IllegalArgumentException("Cannot change column [" + currentColumnName + "] on table [" + tableName + "], the 'from' column definition does not match: " + differences.differences()); } // Substitute in the new column currentColumnName = columnEndPoint.getName(); foundMatch = true; } if (!processedColumns.add(column.getUpperCaseName())) { throw new IllegalArgumentException(String.format("Cannot change column name from [%s] to [%s] on table [%s] as column with that name already exists", columnStartPoint.getName(), columnEndPoint.getName(), tableName)); } columns.add(currentColumnName); } if (!foundMatch) { throw new IllegalArgumentException(String.format("Cannot change column [%s] as it does not exist on table [%s]", columnStartPoint.getName(), tableName)); } // If the column is being renamed, check it isn't contained in an index if (!columnStartPoint.getUpperCaseName().equals(columnEndPoint.getUpperCaseName())) { for (Index index : original.indexes()) { for (String indexedColumnName : index.columnNames()) { if (indexedColumnName.equalsIgnoreCase(columnStartPoint.getName())) { throw new IllegalArgumentException( String.format("Cannot rename column [%s] as it exists on index [%s] on table [%s]", columnStartPoint.getName(), index.getName(), tableName)); } } } } return new TableOverrideSchema(schema, new AlteredTable(original, columns, Arrays.asList(new Column[] {columnEndPoint}))); }
3.68
hudi_BloomFilterFactory_createBloomFilter
/** * Creates a new {@link BloomFilter} with the given args. * * @param numEntries total number of entries * @param errorRate max allowed error rate * @param bloomFilterTypeCode bloom filter type code * @return the {@link BloomFilter} thus created */ public static BloomFilter createBloomFilter(int numEntries, double errorRate, int maxNumberOfEntries, String bloomFilterTypeCode) { if (bloomFilterTypeCode.equalsIgnoreCase(BloomFilterTypeCode.SIMPLE.name())) { return new SimpleBloomFilter(numEntries, errorRate, Hash.MURMUR_HASH); } else if (bloomFilterTypeCode.equalsIgnoreCase(BloomFilterTypeCode.DYNAMIC_V0.name())) { return new HoodieDynamicBoundedBloomFilter(numEntries, errorRate, Hash.MURMUR_HASH, maxNumberOfEntries); } else { throw new IllegalArgumentException("Bloom Filter type code not recognizable " + bloomFilterTypeCode); } }
3.68
flink_Pattern_optional
/** * Specifies that this pattern is optional for a final match of the pattern sequence to happen. * * @return The same pattern as optional. * @throws MalformedPatternException if the quantifier is not applicable to this pattern. */ public Pattern<T, F> optional() { checkIfPreviousPatternGreedy(); quantifier.optional(); return this; }
3.68
hadoop_AllocateRequest_getSchedulingRequests
/** * Get the list of Scheduling requests being sent by the * <code>ApplicationMaster</code>. * @return list of {@link SchedulingRequest} being sent by the * <code>ApplicationMaster</code>. */ @Public @Unstable public List<SchedulingRequest> getSchedulingRequests() { return Collections.emptyList(); }
3.68
hadoop_InstantiationIOException_unsupportedConstructor
/** * Failure to find a valid constructor (signature, visibility) or * factory method. * @param uri URI of filesystem * @param classname classname. * @param key configuration key * @return an exception. */ public static InstantiationIOException unsupportedConstructor( @Nullable URI uri, String classname, String key) { return new InstantiationIOException(Kind.UnsupportedConstructor, uri, classname, key, CONSTRUCTOR_EXCEPTION, null); }
3.68
flink_InternalWindowProcessFunction_isCleanupTime
/** Returns {@code true} if the given time is the cleanup time for the given window. */ protected final boolean isCleanupTime(W window, long time) { return time == toEpochMillsForTimer(cleanupTime(window), ctx.getShiftTimeZone()); }
3.68
hadoop_SchedulerHealth_getAggregatePreemptionCount
/** * Get the aggregate of all the preemption count. * * @return aggregate preemption count */ public Long getAggregatePreemptionCount() { return getAggregateOperationCount(Operation.PREEMPTION); }
3.68
flink_Tuple15_equals
/** * Deep equality for tuples by calling equals() on the tuple members. * * @param o the object checked for equality * @return true if this is equal to o. */ @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof Tuple15)) { return false; } @SuppressWarnings("rawtypes") Tuple15 tuple = (Tuple15) o; if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) { return false; } if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) { return false; } if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) { return false; } if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) { return false; } if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) { return false; } if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) { return false; } if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) { return false; } if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) { return false; } if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) { return false; } if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) { return false; } if (f10 != null ? !f10.equals(tuple.f10) : tuple.f10 != null) { return false; } if (f11 != null ? !f11.equals(tuple.f11) : tuple.f11 != null) { return false; } if (f12 != null ? !f12.equals(tuple.f12) : tuple.f12 != null) { return false; } if (f13 != null ? !f13.equals(tuple.f13) : tuple.f13 != null) { return false; } if (f14 != null ? !f14.equals(tuple.f14) : tuple.f14 != null) { return false; } return true; }
3.68
AreaShop_FileManager_getGroups
/** * Get all groups. * @return Collection with all groups (safe to modify) */ public Collection<RegionGroup> getGroups() { return groups.values(); }
3.68
framework_VCalendarPanel_focusPreviousYear
/** * Selects the previous year */ private void focusPreviousYear(int years) { if (focusedDate == null) { return; } Date previousYearDate = (Date) focusedDate.clone(); previousYearDate.setYear(previousYearDate.getYear() - years); // Do not focus if not inside range if (!isDateInsideRange(previousYearDate, Resolution.YEAR)) { return; } // If we remove one year, but have to roll back a bit, fit it // into the calendar. Also the months have to be changed if (!isDateInsideRange(previousYearDate, Resolution.DAY)) { previousYearDate = adjustDateToFitInsideRange(previousYearDate); focusedDate.setYear(previousYearDate.getYear()); focusedDate.setMonth(previousYearDate.getMonth()); focusedDate.setDate(previousYearDate.getDate()); displayedMonth.setYear(previousYearDate.getYear()); displayedMonth.setMonth(previousYearDate.getMonth()); } else { int currentMonth = focusedDate.getMonth(); focusedDate.setYear(focusedDate.getYear() - years); displayedMonth.setYear(displayedMonth.getYear() - years); /* * If the focused date was a leap day (Feb 29), the new date becomes * Mar 1 if the new year is not also a leap year. Set it to Feb 28 * instead. */ if (focusedDate.getMonth() != currentMonth) { focusedDate.setDate(0); } } renderCalendar(); }
3.68
hudi_HoodieMetaSyncOperations_getStorageFieldSchemas
/** * Get the list of field schema from the Hudi table on storage. */ default List<FieldSchema> getStorageFieldSchemas() { return Collections.emptyList(); }
3.68
framework_MonthEventLabel_setHTML
/* * (non-Javadoc) * * @see com.google.gwt.user.client.ui.HTML#setHTML(java.lang.String) */ @Override public void setHTML(String html) { throw new UnsupportedOperationException( "Use setCaption() and setTime() instead"); }
3.68
hbase_RandomRowFilter_getChance
/** Returns The chance that a row gets included. */ public float getChance() { return chance; }
3.68
hbase_Mutation_setTTL
/** * Set the TTL desired for the result of the mutation, in milliseconds. * @param ttl the TTL desired for the result of the mutation, in milliseconds */ public Mutation setTTL(long ttl) { setAttribute(OP_ATTRIBUTE_TTL, Bytes.toBytes(ttl)); return this; }
3.68
framework_AbstractComponent_setDescription
/** * Sets the component's description using given content {@code mode}. See * {@link #getDescription()} for more information on what the description * is. * <p> * If the content {@code mode} is {@literal ContentMode.HTML} the * description is displayed as HTML in tooltips or directly in certain * components so care should be taken to avoid creating the possibility for * HTML injection and possibly XSS vulnerabilities. * * @param description * the new description string for the component. * @param mode * the content mode for the description * @since 8.0 */ public void setDescription(String description, ContentMode mode) { getState().description = description; getState().descriptionContentMode = mode; }
3.68
hbase_RpcExecutor_startHandlers
/** * Start up our handlers. */ protected void startHandlers(final String nameSuffix, final int numHandlers, final List<BlockingQueue<CallRunner>> callQueues, final int qindex, final int qsize, final int port, final AtomicInteger activeHandlerCount) { final String threadPrefix = name + Strings.nullToEmpty(nameSuffix); double handlerFailureThreshhold = conf == null ? 1.0 : conf.getDouble(HConstants.REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT, HConstants.DEFAULT_REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT); for (int i = 0; i < numHandlers; i++) { final int index = qindex + (i % qsize); String name = "RpcServer." + threadPrefix + ".handler=" + handlers.size() + ",queue=" + index + ",port=" + port; RpcHandler handler = getHandler(name, handlerFailureThreshhold, handlerCount, callQueues.get(index), activeHandlerCount, failedHandlerCount, abortable); handler.start(); handlers.add(handler); } LOG.debug("Started handlerCount={} with threadPrefix={}, numCallQueues={}, port={}", handlers.size(), threadPrefix, qsize, port); }
3.68
framework_RowVisibilityChangeEvent_getAssociatedType
/* * (non-Javadoc) * * @see com.google.gwt.event.shared.GwtEvent#getAssociatedType() */ @Override public Type<RowVisibilityChangeHandler> getAssociatedType() { return TYPE; }
3.68
hadoop_Sets_newTreeSet
/** * Creates a <i>mutable</i> {@code TreeSet} instance containing the given * elements sorted by their natural ordering. * * <p><b>Note:</b> if mutability is not required, use * ImmutableSortedSet#copyOf(Iterable) instead. * * <p><b>Note:</b> If {@code elements} is a {@code SortedSet} with an * explicit comparator, this method has different behavior than * {@link TreeSet#TreeSet(SortedSet)}, which returns a {@code TreeSet} * with that comparator. * * <p><b>Note for Java 7 and later:</b> this method is now unnecessary and * should be treated as deprecated. Instead, use the {@code TreeSet} * constructor directly, taking advantage of the new * <a href="http://goo.gl/iz2Wi">"diamond" syntax</a>. * * <p>This method is just a small convenience for creating an empty set and * then calling Iterables#addAll. This method is not very useful and will * likely be deprecated in the future. * * @param <E> Generics Type E. * @param elements the elements that the set should contain * @return a new {@code TreeSet} containing those elements (minus duplicates) */ public static <E extends Comparable> TreeSet<E> newTreeSet( Iterable<? extends E> elements) { TreeSet<E> set = newTreeSet(); addAll(set, elements); return set; }
3.68
framework_TooltipInfo_setTitle
/** * Sets the tooltip title. * * @param title * the title to set */ public void setTitle(String title) { this.title = title; }
3.68
pulsar_PulsarAdminImpl_worker
/** * @return the Worker stats */ public Worker worker() { return worker; }
3.68
flink_StreamingFileSink_forRowFormat
/** * Creates the builder for a {@link StreamingFileSink} with row-encoding format. * * @param basePath the base path where all the buckets are going to be created as * sub-directories. * @param encoder the {@link Encoder} to be used when writing elements in the buckets. * @param <IN> the type of incoming elements * @return The builder where the remaining of the configuration parameters for the sink can be * configured. In order to instantiate the sink, call {@link RowFormatBuilder#build()} after * specifying the desired parameters. */ public static <IN> StreamingFileSink.DefaultRowFormatBuilder<IN> forRowFormat( final Path basePath, final Encoder<IN> encoder) { return new DefaultRowFormatBuilder<>(basePath, encoder, new DateTimeBucketAssigner<>()); }
3.68
hbase_MultiVersionConcurrencyControl_waitForRead
/** * Wait for the global readPoint to advance up to the passed in write entry number. */ void waitForRead(WriteEntry e) { boolean interrupted = false; int count = 0; synchronized (readWaiters) { while (readPoint.get() < e.getWriteNumber()) { if (count % 100 == 0 && count > 0) { long totalWaitTillNow = READPOINT_ADVANCE_WAIT_TIME * count; LOG.warn("STUCK for : " + totalWaitTillNow + " millis. " + this); } count++; try { readWaiters.wait(READPOINT_ADVANCE_WAIT_TIME); } catch (InterruptedException ie) { // We were interrupted... finish the loop -- i.e. cleanup --and then // on our way out, reset the interrupt flag. interrupted = true; } } } if (interrupted) { Thread.currentThread().interrupt(); } }
3.68
framework_VCalendarPanel_onMouseDown
/* * (non-Javadoc) * * @see * com.google.gwt.event.dom.client.MouseDownHandler#onMouseDown(com.google * .gwt.event.dom.client.MouseDownEvent) */ @Override public void onMouseDown(MouseDownEvent event) { // Click-n-hold the left mouse button for fast-forward or fast-rewind. // Timer is first used for a 500ms delay after mousedown. After that has // elapsed, another timer is triggered to go off every 150ms. Both // timers are cancelled on mouseup or mouseout. if (event.getNativeButton() == NativeEvent.BUTTON_LEFT && event.getSource() instanceof VEventButton) { final VEventButton sender = (VEventButton) event.getSource(); processClickEvent(sender); mouseTimer = new Timer() { @Override public void run() { mouseTimer = new Timer() { @Override public void run() { processClickEvent(sender); } }; mouseTimer.scheduleRepeating(150); } }; mouseTimer.schedule(500); } }
3.68
pulsar_BlobStoreManagedLedgerOffloader_getBlobStoreLocation
/** * Attempts to create a BlobStoreLocation from the values in the offloadDriverMetadata, * however, if no values are available, it defaults to the currently configured * provider, region, bucket, etc. * * @param offloadDriverMetadata * @return */ private BlobStoreLocation getBlobStoreLocation(Map<String, String> offloadDriverMetadata) { return (!offloadDriverMetadata.isEmpty()) ? new BlobStoreLocation(offloadDriverMetadata) : new BlobStoreLocation(getOffloadDriverMetadata()); }
3.68
framework_CurrentInstance_clearAll
/** * Clears all current instances. */ public static void clearAll() { INSTANCES.remove(); }
3.68
framework_ClassResource_getMIMEType
/** * Gets the MIME type of this resource. * * @see com.vaadin.server.Resource#getMIMEType() */ @Override public String getMIMEType() { return FileTypeResolver.getMIMEType(resourceName); }
3.68
dubbo_RestRPCInvocationUtil_getInvoker
/** * get invoker by path matcher * * @param pathMatcher * @return */ public static Invoker getInvoker(PathMatcher pathMatcher, ServiceDeployer serviceDeployer) { InvokerAndRestMethodMetadataPair pair = getRestMethodMetadataAndInvokerPair(pathMatcher, serviceDeployer); if (pair == null) { return null; } return pair.getInvoker(); }
3.68
hbase_KeyValue_heapSize
/** * HeapSize implementation * <p/> * We do not count the bytes in the rowCache because it should be empty for a KeyValue in the * MemStore. */ @Override public long heapSize() { // Deep object overhead for this KV consists of two parts. The first part is the KV object // itself, while the second part is the backing byte[]. We will only count the array overhead // from the byte[] only if this is the first KV in there. int fixed = ClassSize.align(FIXED_OVERHEAD); if (offset == 0) { // count both length and object overhead return fixed + ClassSize.sizeOfByteArray(length); } else { // only count the number of bytes return (long) fixed + length; } }
3.68
hudi_TerminationStrategyUtils_createPostWriteTerminationStrategy
/** * Create a PostWriteTerminationStrategy class via reflection, * <br> * if the class name of PostWriteTerminationStrategy is configured through the {@link HoodieStreamer.Config#postWriteTerminationStrategyClass}. */ public static Option<PostWriteTerminationStrategy> createPostWriteTerminationStrategy(TypedProperties properties, String postWriteTerminationStrategyClass) throws HoodieException { try { return StringUtils.isNullOrEmpty(postWriteTerminationStrategyClass) ? Option.empty() : Option.of((PostWriteTerminationStrategy) ReflectionUtils.loadClass(postWriteTerminationStrategyClass, properties)); } catch (Throwable e) { throw new HoodieException("Could not create PostWritTerminationStrategy class " + postWriteTerminationStrategyClass, e); } }
3.68
AreaShop_WorldGuardRegionFlagsFeature_parseAccessSet
/** * Build an RegionAccessSet from an input that specifies player names, player uuids and groups. * @param input Input string defining the access set * @return RegionAccessSet containing the entities parsed from the input */ public RegionAccessSet parseAccessSet(String input) { RegionAccessSet result = new RegionAccessSet(); String[] inputParts = input.split(", "); for(String access : inputParts) { if(access != null && !access.isEmpty()) { // Check for groups if(access.startsWith("g:")) { if(access.length() > 2) { result.getGroupNames().add(access.substring(2)); } } else if(access.startsWith("n:")) { if(access.length() > 2) { result.getPlayerNames().add(access.substring(2)); } } else { try { result.getPlayerUniqueIds().add(UUID.fromString(access)); } catch(IllegalArgumentException e) { AreaShop.warn("Tried using '" + access + "' as uuid for a region member/owner, is your flagProfiles section correct?"); } } } } return result; }
3.68
flink_SavepointReader_readUnionState
/** * Read operator {@code UnionState} from a {@code Savepoint} when a custom serializer was used; * e.g., a different serializer than the one returned by {@code * TypeInformation#createSerializer}. * * @param identifier The identifier of the operator. * @param name The (unique) name for the state. * @param typeInfo The type of the elements in the state. * @param serializer The serializer used to write the elements into state. * @param <T> The type of the values that are in the union state. * @return A {@code DataStream} representing the elements in state. * @throws IOException If the savepoint path is invalid or the uid does not exist. */ public <T> DataStream<T> readUnionState( OperatorIdentifier identifier, String name, TypeInformation<T> typeInfo, TypeSerializer<T> serializer) throws IOException { return readUnionState(identifier, typeInfo, new ListStateDescriptor<>(name, serializer)); }
3.68
flink_MailboxProcessor_allActionsCompleted
/** * This method must be called to end the stream task when all actions for the tasks have been * performed. */ public void allActionsCompleted() { sendPoisonMail( () -> { mailboxLoopRunning = false; suspended = true; }); }
3.68
hbase_HMaster_initClusterSchemaService
// Will be overridden in tests @InterfaceAudience.Private protected void initClusterSchemaService() throws IOException, InterruptedException { this.clusterSchemaService = new ClusterSchemaServiceImpl(this); this.clusterSchemaService.startAsync(); try { this.clusterSchemaService .awaitRunning(getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS, DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS); } catch (TimeoutException toe) { throw new IOException("Timedout starting ClusterSchemaService", toe); } }
3.68
hadoop_AsyncDataService_writeAsync
/** * Write the data to HDFS asynchronously */ void writeAsync(OpenFileCtx openFileCtx) { if (LOG.isDebugEnabled()) { LOG.debug("Scheduling write back task for fileId: " + openFileCtx.getLatestAttr().getFileId()); } WriteBackTask wbTask = new WriteBackTask(openFileCtx); execute(wbTask); }
3.68
flink_SavepointWriter_newSavepoint
/** * Creates a new savepoint. * * @param stateBackend The state backend of the savepoint used for keyed state. * @param maxParallelism The max parallelism of the savepoint. * @return A {@link SavepointWriter}. * @see #newSavepoint(StreamExecutionEnvironment, int) */ public static SavepointWriter newSavepoint( StreamExecutionEnvironment executionEnvironment, StateBackend stateBackend, int maxParallelism) { return new SavepointWriter( createSavepointMetadata(maxParallelism), stateBackend, executionEnvironment); }
3.68
morf_DatabaseMetaDataProvider_isIgnoredTable
/** * Identify whether or not the specified table should be ignored in the metadata. This is * typically used to filter temporary tables. * * @param tableName The table which we are accessing. * @return <var>true</var> if the table should be ignored, false otherwise. */ protected boolean isIgnoredTable(@SuppressWarnings("unused") RealName tableName) { return false; }
3.68
hbase_BlockingRpcConnection_writeRequest
/** * Initiates a call by sending the parameter to the remote server. Note: this is not called from * the Connection thread, but by other threads. * @see #readResponse() */ private void writeRequest(Call call) throws IOException { ByteBuf cellBlock = null; try { cellBlock = this.rpcClient.cellBlockBuilder.buildCellBlock(this.codec, this.compressor, call.cells, PooledByteBufAllocator.DEFAULT); CellBlockMeta cellBlockMeta; if (cellBlock != null) { cellBlockMeta = CellBlockMeta.newBuilder().setLength(cellBlock.readableBytes()).build(); } else { cellBlockMeta = null; } RequestHeader requestHeader = buildRequestHeader(call, cellBlockMeta); setupIOstreams(); // Now we're going to write the call. We take the lock, then check that the connection // is still valid, and, if so we do the write to the socket. If the write fails, we don't // know where we stand, we have to close the connection. if (Thread.interrupted()) { throw new InterruptedIOException(); } calls.put(call.id, call); // We put first as we don't want the connection to become idle. // from here, we do not throw any exception to upper layer as the call has been tracked in // the pending calls map. try { call.callStats.setRequestSizeBytes(write(this.out, requestHeader, call.param, cellBlock)); } catch (Throwable t) { if (LOG.isTraceEnabled()) { LOG.trace("Error while writing {}", call.toShortString()); } IOException e = IPCUtil.toIOE(t); closeConn(e); return; } } finally { if (cellBlock != null) { cellBlock.release(); } } notifyAll(); }
3.68
framework_Escalator_measureMinCellWidth
/** * Gets the minimum width needed to display the cell properly. * * @param colIndex * index of column to measure * @param withContent * <code>true</code> if content is taken into account, * <code>false</code> if not * @return cell width needed for displaying correctly */ double measureMinCellWidth(int colIndex, boolean withContent) { assert isAttached() : "Can't measure max width of cell, since Escalator is not attached to the DOM."; double minCellWidth = -1; NodeList<TableRowElement> rows = root.getRows(); for (int row = 0; row < rows.getLength(); row++) { TableCellElement cell = rows.getItem(row).getCells() .getItem(colIndex); if (cell != null && !cellIsPartOfSpan(cell)) { double cellWidth = measureCellWidth(cell, withContent); minCellWidth = Math.max(minCellWidth, cellWidth); } } return minCellWidth; }
3.68
framework_VCalendarPanel_onSubmit
/** * Notifies submit-listeners of a submit event */ private void onSubmit() { if (getSubmitListener() != null) { getSubmitListener().onSubmit(); } }
3.68
flink_ColumnReferenceFinder_findWatermarkReferencedColumn
/** * Find referenced column names that derive the watermark expression. * * @param schema resolved columns contains the watermark expression. * @return the referenced column names */ public static Set<String> findWatermarkReferencedColumn(ResolvedSchema schema) { ColumnReferenceVisitor visitor = new ColumnReferenceVisitor(schema.getColumnNames()); return schema.getWatermarkSpecs().stream() .flatMap( spec -> Stream.concat( visitor.visit(spec.getWatermarkExpression()).stream(), Stream.of(spec.getRowtimeAttribute()))) .collect(Collectors.toSet()); }
3.68
morf_DatabaseMetaDataProvider_readViewName
/** * Retrieves view name from a result set. * * @param viewResultSet Result set to be read. * @return Name of the view. * @throws SQLException Upon errors. */ protected RealName readViewName(ResultSet viewResultSet) throws SQLException { String viewName = viewResultSet.getString(TABLE_NAME); return createRealName(viewName, viewName); }
3.68
hmily_TransactionContext_getCoordinator
/** * Gets coordinator. * * @return the coordinator */ public Coordinator getCoordinator() { return coordinator; }
3.68
flink_SourceTestSuiteBase_generateAndWriteTestData
/** * Generate a set of test records and write it to the given split writer. * * @param externalContext External context * @return List of generated test records */ protected List<T> generateAndWriteTestData( int splitIndex, DataStreamSourceExternalContext<T> externalContext, TestingSourceSettings testingSourceSettings) { List<T> testRecords = externalContext.generateTestData( testingSourceSettings, splitIndex, ThreadLocalRandom.current().nextLong()); LOG.info( "Writing {} records for split {} to external system", testRecords.size(), splitIndex); externalContext .createSourceSplitDataWriter(testingSourceSettings) .writeRecords(testRecords); return testRecords; }
3.68
hadoop_QuotaUsage_getTypeQuota
/** * Return storage type quota. * * @param type storage type. * @return type quota. */ public long getTypeQuota(StorageType type) { return (typeQuota != null) ? typeQuota[type.ordinal()] : -1L; }
3.68
flink_DynamicEventTimeSessionWindows_withDynamicGap
/** * Creates a new {@code SessionWindows} {@link WindowAssigner} that assigns elements to sessions * based on the element timestamp. * * @param sessionWindowTimeGapExtractor The extractor to use to extract the time gap from the * input elements * @return The policy. */ public static <T> DynamicEventTimeSessionWindows<T> withDynamicGap( SessionWindowTimeGapExtractor<T> sessionWindowTimeGapExtractor) { return new DynamicEventTimeSessionWindows<>(sessionWindowTimeGapExtractor); }
3.68
pulsar_GenericRecord_getField
/** * Retrieve the value of the provided <tt>field</tt>. * * @param field the field to retrieve the value * @return the value object */ default Object getField(Field field) { return getField(field.getName()); }
3.68
flink_CommonExecSink_deriveSinkParallelism
/** * Returns the parallelism of sink operator, it assumes the sink runtime provider implements * {@link ParallelismProvider}. It returns parallelism defined in {@link ParallelismProvider} if * the parallelism is provided, otherwise it uses parallelism of input transformation. */ private int deriveSinkParallelism( Transformation<RowData> inputTransform, SinkRuntimeProvider runtimeProvider) { final int inputParallelism = inputTransform.getParallelism(); if (isParallelismConfigured(runtimeProvider)) { int sinkParallelism = ((ParallelismProvider) runtimeProvider).getParallelism().get(); if (sinkParallelism <= 0) { throw new TableException( String.format( "Invalid configured parallelism %s for table '%s'.", sinkParallelism, tableSinkSpec .getContextResolvedTable() .getIdentifier() .asSummaryString())); } return sinkParallelism; } else { return inputParallelism; } }
3.68
hadoop_WorkerId_toString
/** * Print workerId. * @return workeId in string */ @Override public final String toString() { return workerId.toString(); }
3.68
framework_JsonPaintTarget_close
/** * Closes the paint target. Paint target must be closed before the * <code>getUIDL</code> can be called. Subsequent attempts to write to paint * target. If the target was already closed, call to this function is * ignored. will generate an exception. * * @throws PaintException * if the paint operation failed. */ public void close() throws PaintException { if (tag != null) { uidlBuffer.write(tag.getJSON()); } flush(); closed = true; }
3.68
hudi_HoodieFileGroupReader_initRecordIterators
/** * Initialize internal iterators on the base and log files. */ public void initRecordIterators() { this.baseFileIterator = baseFilePath.isPresent() ? readerContext.getFileRecordIterator( baseFilePath.get().getHadoopPath(), start, length, readerState.baseFileAvroSchema, readerState.baseFileAvroSchema, hadoopConf) : new EmptyIterator<>(); scanLogFiles(); recordBuffer.setBaseFileIterator(baseFileIterator); }
3.68
rocketmq-connect_MetricsReporter_onCounterAdded
/** * Called when a {@link Counter} is added to the registry. * * @param name the counter's name * @param counter the counter */ public void onCounterAdded(String name, Counter counter) { this.onCounterAdded(MetricUtils.stringToMetricName(name), counter.getCount()); }
3.68
hudi_BaseTableMetadata_checkForSpuriousDeletes
/** * Handle spurious deletes. Depending on config, throw an exception or log a warn msg. */ private void checkForSpuriousDeletes(HoodieMetadataPayload metadataPayload, String partitionName) { if (!metadataPayload.getDeletions().isEmpty()) { if (metadataConfig.ignoreSpuriousDeletes()) { LOG.warn("Metadata record for " + partitionName + " encountered some files to be deleted which was not added before. " + "Ignoring the spurious deletes as the `" + HoodieMetadataConfig.IGNORE_SPURIOUS_DELETES.key() + "` config is set to true"); } else { throw new HoodieMetadataException("Metadata record for " + partitionName + " is inconsistent: " + metadataPayload); } } }
3.68
morf_SqlQueryDataSetProducer_records
/** * Returns an iterable of records contained in the {@link ResultSet}. */ @Override public Iterable<Record> records(String tableName) { Table table = getSchema().getTable(tableName); return new Iterable<Record>() { @Override public Iterator<Record> iterator() { ResultSetIterator resultSetIterator = new ResultSetIterator(table, query, connection, sqlDialect); openResultSets.add(resultSetIterator); return resultSetIterator; } }; }
3.68
hadoop_TFile_createScannerByKey
/** * Get a scanner that covers a specific key range. * * @param beginKey * Begin key of the scan (inclusive). If null, scan from the first * key-value entry of the TFile. * @param endKey * End key of the scan (exclusive). If null, scan up to the last * key-value entry of the TFile. * @return The actual coverage of the returned scanner will cover all keys * greater than or equal to the beginKey and less than the endKey. * @throws IOException raised on errors performing I/O. */ public Scanner createScannerByKey(RawComparable beginKey, RawComparable endKey) throws IOException { if ((beginKey != null) && (endKey != null) && (compareKeys(beginKey, endKey) >= 0)) { return new Scanner(this, beginKey, beginKey); } return new Scanner(this, beginKey, endKey); }
3.68
graphhopper_OSMReader_setFile
/** * Sets the OSM file to be read. Supported formats include .osm.xml, .osm.gz and .xml.pbf */ public OSMReader setFile(File osmFile) { this.osmFile = osmFile; return this; }
3.68
morf_SelectStatementBuilder_unionAll
/** * Perform an UNION set operation with another {@code selectStatement}, * keeping all duplicate rows. * * @param selectStatement the other select statement to be united with the current select statement; * @return this, for method chaining. * @see #union(SelectStatement) */ public SelectStatementBuilder unionAll(SelectStatement selectStatement) { setOperators.add(new UnionSetOperator(UnionStrategy.ALL, this.build(), selectStatement)); return this; }
3.68
querydsl_AbstractSQLClause_setParameters
/** * Set the parameters to the given PreparedStatement * * @param stmt preparedStatement to be populated * @param objects list of constants * @param constantPaths list of paths related to the constants * @param params map of param to value for param resolving */ protected void setParameters(PreparedStatement stmt, List<?> objects, List<Path<?>> constantPaths, Map<ParamExpression<?>, ?> params) { if (objects.size() != constantPaths.size()) { throw new IllegalArgumentException("Expected " + objects.size() + " paths, " + "but got " + constantPaths.size()); } for (int i = 0; i < objects.size(); i++) { Object o = objects.get(i); try { if (o instanceof ParamExpression) { if (!params.containsKey(o)) { throw new ParamNotSetException((ParamExpression<?>) o); } o = params.get(o); } configuration.set(stmt, constantPaths.get(i), i + 1, o); } catch (SQLException e) { throw configuration.translate(e); } } }
3.68
morf_ChangePrimaryKeyColumns_accept
/** * @see org.alfasoftware.morf.upgrade.SchemaChange#accept(org.alfasoftware.morf.upgrade.SchemaChangeVisitor) */ @Override public void accept(SchemaChangeVisitor visitor) { visitor.visit(this); }
3.68
framework_TablePushStreaming_getTicketNumber
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber() */ @Override protected Integer getTicketNumber() { return null; }
3.68
flink_RestartAllFailoverStrategy_getTasksNeedingRestart
/** * Returns all vertices on any task failure. * * @param executionVertexId ID of the failed task * @param cause cause of the failure * @return set of IDs of vertices to restart */ @Override public Set<ExecutionVertexID> getTasksNeedingRestart( ExecutionVertexID executionVertexId, Throwable cause) { return IterableUtils.toStream(topology.getVertices()) .map(SchedulingExecutionVertex::getId) .collect(Collectors.toSet()); }
3.68
framework_DateField_handleUnparsableDateString
/** * This method is called to handle a non-empty date string from the client * if the client could not parse it as a Date. * * By default, a Converter.ConversionException is thrown, and the current * value is not modified. * * This can be overridden to handle conversions, to return null (equivalent * to empty input), to throw an exception or to fire an event. * * @param dateString * @return parsed Date * @throws Converter.ConversionException * to keep the old value and indicate an error */ protected Date handleUnparsableDateString(String dateString) throws Converter.ConversionException { currentParseErrorMessage = null; throw new Converter.ConversionException(getParseErrorMessage()); }
3.68
dubbo_ApplicationModel_allProviderModels
/** * @deprecated use {@link ServiceRepository#allProviderModels()} */ @Deprecated public static Collection<ProviderModel> allProviderModels() { return defaultModel().getApplicationServiceRepository().allProviderModels(); }
3.68
framework_Window_removeCloseShortcut
/** * Removes a close shortcut previously added with * {@link #addCloseShortcut(int, int...)}. * * @since 7.6 * @param keyCode * the keycode for invoking the shortcut * @param modifiers * the (optional) modifiers for invoking the shortcut. Can be set * to null to be explicit about not having modifiers. */ public void removeCloseShortcut(int keyCode, int... modifiers) { for (CloseShortcut shortcut : closeShortcuts) { if (shortcut.equals(keyCode, modifiers)) { removeAction(shortcut); closeShortcuts.remove(shortcut); return; } } }
3.68
hbase_MasterObserver_postGetTableNames
/** * Called after a getTableNames request has been processed. * @param ctx the environment to interact with the framework and master * @param descriptors the list of descriptors about to be returned * @param regex regular expression used for filtering the table names */ default void postGetTableNames(ObserverContext<MasterCoprocessorEnvironment> ctx, List<TableDescriptor> descriptors, String regex) throws IOException { }
3.68
morf_TableBean_getName
/** * @see org.alfasoftware.morf.metadata.Table#getName() */ @Override public String getName() { return tableName; }
3.68
hadoop_CompositeService_addIfService
/** * If the passed object is an instance of {@link Service}, * add it to the list of services managed by this {@link CompositeService} * @param object object. * @return true if a service is added, false otherwise. */ protected boolean addIfService(Object object) { if (object instanceof Service) { addService((Service) object); return true; } else { return false; } }
3.68
Activiti_DefaultServiceTaskBehavior_execute
/** * We have two different implementation strategy that can be executed * in according if we have a connector action definition match or not. **/ @Override public void execute(DelegateExecution execution) { Connector connector = getConnector(getImplementation(execution)); IntegrationContext integrationContext = connector.apply(integrationContextBuilder.from(execution)); variablesPropagator.propagate(execution, integrationContext.getOutBoundVariables()); leave(execution); }
3.68
hbase_HMaster_filterTablesByRegex
/** * Removes the table descriptors that don't match the pattern. * @param descriptors list of table descriptors to filter * @param pattern the regex to use */ private static void filterTablesByRegex(final Collection<TableDescriptor> descriptors, final Pattern pattern) { final String defaultNS = NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR; Iterator<TableDescriptor> itr = descriptors.iterator(); while (itr.hasNext()) { TableDescriptor htd = itr.next(); String tableName = htd.getTableName().getNameAsString(); boolean matched = pattern.matcher(tableName).matches(); if (!matched && htd.getTableName().getNamespaceAsString().equals(defaultNS)) { matched = pattern.matcher(defaultNS + TableName.NAMESPACE_DELIM + tableName).matches(); } if (!matched) { itr.remove(); } } }
3.68
hudi_DynamoTableUtils_waitForTableDescription
/** * Wait for the table to reach the desired status and returns the table * description * * @param dynamo * Dynamo client to use * @param tableName * Table name to poll status of * @param desiredStatus * Desired {@link TableStatus} to wait for. If null this method * simply waits until DescribeTable returns something non-null * (i.e. any status) * @param timeout * Timeout in milliseconds to continue to poll for desired status * @param interval * Time to wait in milliseconds between poll attempts * @return Null if DescribeTables never returns a result, otherwise the * result of the last poll attempt (which may or may not have the * desired state) * @throws {@link * IllegalArgumentException} If timeout or interval is invalid */ private static TableDescription waitForTableDescription(final DynamoDbClient dynamo, final String tableName, TableStatus desiredStatus, final int timeout, final int interval) throws InterruptedException, IllegalArgumentException { if (timeout < 0) { throw new IllegalArgumentException("Timeout must be >= 0"); } if (interval <= 0 || interval >= timeout) { throw new IllegalArgumentException("Interval must be > 0 and < timeout"); } long startTime = System.currentTimeMillis(); long endTime = startTime + timeout; TableDescription table = null; while (System.currentTimeMillis() < endTime) { try { table = dynamo.describeTable(DescribeTableRequest.builder().tableName(tableName).build()).table(); if (desiredStatus == null || table.tableStatus().equals(desiredStatus)) { return table; } } catch (ResourceNotFoundException rnfe) { // ResourceNotFound means the table doesn't exist yet, // so ignore this error and just keep polling. } Thread.sleep(interval); } return table; }
3.68
hadoop_DelegationBindingInfo_getCredentialProviders
/** * Get list of credential providers. * @return list of credential providers */ public AWSCredentialProviderList getCredentialProviders() { return credentialProviders; }
3.68
hbase_ZkSplitLogWorkerCoordination_areSplittersAvailable
/** Returns true if more splitters are available, otherwise false. */ private boolean areSplittersAvailable() { return maxConcurrentTasks - tasksInProgress.get() > 0; }
3.68
flink_ResourceManager_stopWorkerIfSupported
/** * Stops the given worker if supported. * * @param worker The worker. */ public void stopWorkerIfSupported(WorkerType worker) { if (resourceAllocator.isSupported()) { resourceAllocator.cleaningUpDisconnectedResource(worker.getResourceID()); } }
3.68
hbase_BlockingRpcConnection_handleConnectionFailure
/** * Handle connection failures If the current number of retries is equal to the max number of * retries, stop retrying and throw the exception; Otherwise backoff N seconds and try connecting * again. This Method is only called from inside setupIOstreams(), which is synchronized. Hence * the sleep is synchronized; the locks will be retained. * @param curRetries current number of retries * @param maxRetries max number of retries allowed * @param ioe failure reason * @throws IOException if max number of retries is reached */ private void handleConnectionFailure(int curRetries, int maxRetries, IOException ioe) throws IOException { closeSocket(); // throw the exception if the maximum number of retries is reached if (curRetries >= maxRetries || ExceptionUtil.isInterrupt(ioe)) { throw ioe; } // otherwise back off and retry try { Thread.sleep(this.rpcClient.failureSleep); } catch (InterruptedException ie) { ExceptionUtil.rethrowIfInterrupt(ie); } if (LOG.isInfoEnabled()) { LOG.info("Retrying connect to server: " + remoteId.getAddress() + " after sleeping " + this.rpcClient.failureSleep + "ms. Already tried " + curRetries + " time(s)."); } }
3.68
morf_SchemaEditor_addPrimaryKey
/** * Create the primary key of a table. * * @param tableName The original table name * @param newPrimaryKeyColumns The new primary key columns for the table. */ default void addPrimaryKey(String tableName, List<String> newPrimaryKeyColumns){ changePrimaryKeyColumns(tableName, Collections.emptyList(), newPrimaryKeyColumns); }
3.68
rocketmq-connect_WorkerSinkTask_doCommitSync
/** * do commit * * @param offsets * @param seqno */ private void doCommitSync(Map<MessageQueue, Long> offsets, int seqno) { log.debug("{} Committing offsets synchronously using sequence number {}: {}", this, seqno, offsets); try { for (Map.Entry<MessageQueue, Long> offsetEntry : offsets.entrySet()) { consumer.getOffsetStore().updateOffset(offsetEntry.getKey(), offsetEntry.getValue(), true); // consumer.getOffsetStore().updateConsumeOffsetToBroker(offsetEntry.getKey(), offsetEntry.getValue(), false); } onCommitCompleted(null, seqno, offsets); } catch (Exception e) { onCommitCompleted(e, seqno, offsets); } }
3.68
zxing_AlignmentPattern_aboutEquals
/** * <p>Determines if this alignment pattern "about equals" an alignment pattern at the stated * position and size -- meaning, it is at nearly the same center with nearly the same size.</p> */ boolean aboutEquals(float moduleSize, float i, float j) { if (Math.abs(i - getY()) <= moduleSize && Math.abs(j - getX()) <= moduleSize) { float moduleSizeDiff = Math.abs(moduleSize - estimatedModuleSize); return moduleSizeDiff <= 1.0f || moduleSizeDiff <= estimatedModuleSize; } return false; }
3.68
hudi_FileStatusUtils_safeReadAndSetMetadata
/** * Used to safely handle FileStatus calls which might fail on some FileSystem implementation. * (DeprecatedLocalFileSystem) */ private static void safeReadAndSetMetadata(HoodieFileStatus fStatus, FileStatus fileStatus) { try { fStatus.setOwner(fileStatus.getOwner()); fStatus.setGroup(fileStatus.getGroup()); fStatus.setPermission(fromFSPermission(fileStatus.getPermission())); } catch (IllegalArgumentException ie) { // Deprecated File System (testing) does not work well with this call // skipping } }
3.68
hbase_HBaseTestingUtility_getDifferentUser
/** * This method clones the passed <code>c</code> configuration setting a new user into the clone. * Use it getting new instances of FileSystem. Only works for DistributedFileSystem w/o Kerberos. * @param c Initial configuration * @param differentiatingSuffix Suffix to differentiate this user from others. * @return A new configuration instance with a different user set into it. */ public static User getDifferentUser(final Configuration c, final String differentiatingSuffix) throws IOException { FileSystem currentfs = FileSystem.get(c); if (!(currentfs instanceof DistributedFileSystem) || User.isHBaseSecurityEnabled(c)) { return User.getCurrent(); } // Else distributed filesystem. Make a new instance per daemon. Below // code is taken from the AppendTestUtil over in hdfs. String username = User.getCurrent().getName() + differentiatingSuffix; User user = User.createUserForTesting(c, username, new String[] { "supergroup" }); return user; }
3.68
hbase_HFilePrettyPrinter_newBuilder
/** * Returns a new {@link Builder} for {@link SimpleReporter}. * @return a {@link Builder} instance for a {@link SimpleReporter} */ public static Builder newBuilder() { return new Builder(); }
3.68
framework_AbstractDateFieldConnector_setAndUpdateAssistiveLabels
/** * Sets assistive labels for the calendar panel's navigation elements, and * updates these labels. * * @param calendar * the calendar panel for which to set the assistive labels * @since 8.4 */ @SuppressWarnings("rawtypes") protected void setAndUpdateAssistiveLabels( VAbstractCalendarPanel calendar) { calendar.setAssistiveLabelPreviousMonth(getState().assistiveLabels .get(AccessibleElement.PREVIOUS_MONTH)); calendar.setAssistiveLabelNextMonth( getState().assistiveLabels.get(AccessibleElement.NEXT_MONTH)); calendar.setAssistiveLabelPreviousYear(getState().assistiveLabels .get(AccessibleElement.PREVIOUS_YEAR)); calendar.setAssistiveLabelNextYear( getState().assistiveLabels.get(AccessibleElement.NEXT_YEAR)); calendar.updateAssistiveLabels(); }
3.68
flink_SafetyNetCloseableRegistry_doClose
/** * This implementation doesn't imply any exception during closing due to backward compatibility. */ @Override protected void doClose(List<Closeable> toClose) throws IOException { try { IOUtils.closeAllQuietly(toClose); } finally { synchronized (REAPER_THREAD_LOCK) { --GLOBAL_SAFETY_NET_REGISTRY_COUNT; if (0 == GLOBAL_SAFETY_NET_REGISTRY_COUNT) { REAPER_THREAD.interrupt(); REAPER_THREAD = null; } } } }
3.68
morf_SqlScriptExecutor_set
/** * @param value the value to set */ void set(T value) { this.value = value; }
3.68
flink_DataStreamUtils_collectBoundedStream
/** * Collects contents the given DataStream into a list, assuming that the stream is a bounded * stream. * * <p>This method blocks until the job execution is complete. By the time the method returns, * the job will have reached its FINISHED status. * * <p>Note that if the stream is unbounded, this method will never return and might fail with an * Out-of-Memory Error because it attempts to collect an infinite stream into a list. * * @throws Exception Exceptions that occur during the execution are forwarded. * @deprecated Please use {@link DataStream#executeAndCollect()}. */ @Deprecated public static <E> List<E> collectBoundedStream(DataStream<E> stream, String jobName) throws Exception { final ArrayList<E> list = new ArrayList<>(); final Iterator<E> iter = collectWithClient(stream, jobName).iterator; while (iter.hasNext()) { list.add(iter.next()); } list.trimToSize(); return list; }
3.68
framework_LoadingIndicatorConfiguration_getSecondDelay
/* * (non-Javadoc) * * @see com.vaadin.ui.LoadingIndicator#getSecondDelay() */ @Override public int getSecondDelay() { return getState(false).secondDelay; }
3.68
benchmark_PravegaBenchmarkTransactionProducer_probeRequested
/** * Indicates if producer probe had been requested by OpenMessaging benchmark. * * @param key - key provided to the probe. * @return true in case requested event had been created in context of producer probe. */ private boolean probeRequested(Optional<String> key) { // For the expected key, see: LocalWorker.probeProducers() final String expectedKey = "key"; return key.isPresent() && key.get().equals(expectedKey); }
3.68
morf_OracleDialect_getSqlForMonthsBetween
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForMonthsBetween(org.alfasoftware.morf.sql.element.AliasedField, org.alfasoftware.morf.sql.element.AliasedField) */ @Override protected String getSqlForMonthsBetween(AliasedField toDate, AliasedField fromDate) { String toDateStr = getSqlFrom(toDate); String fromDateStr = getSqlFrom(fromDate); return "(EXTRACT(YEAR FROM "+toDateStr+") - EXTRACT(YEAR FROM "+fromDateStr+")) * 12" + "+ (EXTRACT(MONTH FROM "+toDateStr+") - EXTRACT(MONTH FROM "+fromDateStr+"))" + "+ CASE WHEN "+toDateStr+" > "+fromDateStr + " THEN CASE WHEN EXTRACT(DAY FROM "+toDateStr+") >= EXTRACT(DAY FROM "+fromDateStr+") THEN 0" + " WHEN EXTRACT(MONTH FROM "+toDateStr+") <> EXTRACT(MONTH FROM "+toDateStr+" + 1) THEN 0" + " ELSE -1 END" + " ELSE CASE WHEN EXTRACT(MONTH FROM "+fromDateStr+") <> EXTRACT(MONTH FROM "+fromDateStr+" + 1) THEN 0" + " WHEN EXTRACT(DAY FROM "+fromDateStr+") >= EXTRACT(DAY FROM "+toDateStr+") THEN 0" + " ELSE 1 END" + " END" + "\n" ; }
3.68
pulsar_BrokerService_addUnAckedMessages
/** * If per-broker unacked message reached to limit then it blocks dispatcher if its unacked message limit has been * reached to {@link #maxUnackedMsgsPerDispatcher}. * * @param dispatcher * @param numberOfMessages */ public void addUnAckedMessages(PersistentDispatcherMultipleConsumers dispatcher, int numberOfMessages) { // don't block dispatchers if maxUnackedMessages = 0 if (maxUnackedMessages > 0) { totalUnackedMessages.add(numberOfMessages); // block dispatcher: if broker is already blocked and dispatcher reaches to max dispatcher limit when broker // is blocked if (blockedDispatcherOnHighUnackedMsgs.get() && !dispatcher.isBlockedDispatcherOnUnackedMsgs() && dispatcher.getTotalUnackedMessages() > maxUnackedMsgsPerDispatcher) { lock.readLock().lock(); try { log.info("[{}] dispatcher reached to max unack msg limit on blocked-broker {}", dispatcher.getName(), dispatcher.getTotalUnackedMessages()); dispatcher.blockDispatcherOnUnackedMsgs(); blockedDispatchers.add(dispatcher); } finally { lock.readLock().unlock(); } } } }
3.68
morf_DatabaseUpgradePathValidationServiceImpl_selectUpgradeAuditTableCount
/** * Creates a select statement which can be used to count the number of upgrade steps that have already been run */ private SelectStatement selectUpgradeAuditTableCount() { TableReference upgradeAuditTable = tableRef(DatabaseUpgradeTableContribution.UPGRADE_AUDIT_NAME); return select(count(upgradeAuditTable.field("upgradeUUID"))) .from(upgradeAuditTable) .build(); }
3.68