name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hadoop_AWSClientIOException_retryable
/** * Query inner cause for retryability. * @return what the cause says. */ public boolean retryable() { return getCause().retryable(); }
3.68
hadoop_TimelineEntity_setStartTime
/** * Set the start time of the entity * * @param startTime * the start time of the entity */ public void setStartTime(Long startTime) { this.startTime = startTime; }
3.68
hadoop_BufferPool_releaseReadyBlock
/** * If no blocks were released after calling releaseDoneBlocks() a few times, * we may end up waiting forever. To avoid that situation, we try releasing * a 'ready' block farthest away from the given block. */ private synchronized void releaseReadyBlock(int blockNumber) { BufferData releaseTarget = null; for (BufferData data : getAll()) { if (data.stateEqualsOneOf(BufferData.State.READY)) { if (releaseTarget == null) { releaseTarget = data; } else { if (distance(data, blockNumber) > distance(releaseTarget, blockNumber)) { releaseTarget = data; } } } } if (releaseTarget != null) { LOG.warn("releasing 'ready' block: {}", releaseTarget); releaseTarget.setDone(); } }
3.68
framework_VaadinSession_setLastRequestTimestamp
/** * Sets the time when the last UIDL request was serviced in this session. * * @param timestamp * The time when the last request was handled, in milliseconds * since the epoch. * */ public void setLastRequestTimestamp(long timestamp) { assert hasLock(); lastRequestTimestamp = timestamp; }
3.68
hbase_TimestampsFilter_getTimestamps
/** Returns the list of timestamps */ public List<Long> getTimestamps() { List<Long> list = new ArrayList<>(timestamps.size()); list.addAll(timestamps); return list; }
3.68
hadoop_SysInfoWindows_getCumulativeCpuTime
/** {@inheritDoc} */ @Override public long getCumulativeCpuTime() { refreshIfNeeded(); return cumulativeCpuTimeMs; }
3.68
hbase_CatalogJanitorReport_isEmpty
/** Returns True if an 'empty' lastReport -- no problems found. */ public boolean isEmpty() { return this.holes.isEmpty() && this.overlaps.isEmpty() && this.unknownServers.isEmpty() && this.emptyRegionInfo.isEmpty(); }
3.68
hadoop_ServiceLauncher_createConfiguration
/** * Override point: create the base configuration for the service. * * Subclasses can override to create HDFS/YARN configurations etc. * @return the configuration to use as the service initializer. */ protected Configuration createConfiguration() { return new Configuration(); }
3.68
flink_SharedReference_consumeSync
/** * Executes the code on the referenced object in a synchronized fashion. Note that this method * is prone to deadlock if multiple references are accessed in a synchronized fashion in a * nested call-chain. */ default void consumeSync(Consumer<T> consumer) { T object = get(); synchronized (object) { consumer.accept(object); } }
3.68
flink_SkipListUtils_putLevelAndNodeStatus
/** * Puts the level and status to the key space. * * @param memorySegment memory segment for key space. * @param offset offset of key space in the memory segment. * @param level the level. * @param status the status. */ public static void putLevelAndNodeStatus( MemorySegment memorySegment, int offset, int level, NodeStatus status) { int data = ((status.getValue() & BYTE_MASK) << 8) | level; memorySegment.putInt(offset + SkipListUtils.KEY_META_OFFSET, data); }
3.68
hbase_DateTieredCompactionPolicy_getCompactBoundariesForMajor
/** * Return a list of boundaries for multiple compaction output in ascending order. */ private List<Long> getCompactBoundariesForMajor(Collection<HStoreFile> filesToCompact, long now) { long minTimestamp = filesToCompact.stream() .mapToLong(f -> f.getMinimumTimestamp().orElse(Long.MAX_VALUE)).min().orElse(Long.MAX_VALUE); List<Long> boundaries = new ArrayList<>(); // Add startMillis of all windows between now and min timestamp for (CompactionWindow window = getIncomingWindow(now); window.compareToTimestamp(minTimestamp) > 0; window = window.nextEarlierWindow()) { boundaries.add(window.startMillis()); } boundaries.add(Long.MIN_VALUE); Collections.reverse(boundaries); return boundaries; }
3.68
zilla_HpackContext_staticIndex13
// Index in static table for the given name of length 13 private static int staticIndex13(DirectBuffer name) { switch (name.getByte(12)) { case 'd': if (STATIC_TABLE[44].name.equals(name)) // last-modified { return 44; } break; case 'e': if (STATIC_TABLE[30].name.equals(name)) // content-range { return 30; } break; case 'h': if (STATIC_TABLE[41].name.equals(name)) // if-none-match { return 41; } break; case 'l': if (STATIC_TABLE[24].name.equals(name)) // cache-control { return 24; } break; case 'n': if (STATIC_TABLE[23].name.equals(name)) // authorization { return 23; } break; case 's': if (STATIC_TABLE[18].name.equals(name)) // accept-ranges { return 18; } break; } return -1; }
3.68
hadoop_AMRunner_startAMFromSLSTrace
/** * Parse workload from a SLS trace file. */ private void startAMFromSLSTrace(String inputTrace) throws IOException { JsonFactory jsonF = new JsonFactory(); ObjectMapper mapper = new ObjectMapper(); try (Reader input = new InputStreamReader( new FileInputStream(inputTrace), StandardCharsets.UTF_8)) { JavaType type = mapper.getTypeFactory(). constructMapType(Map.class, String.class, String.class); Iterator<Map<String, String>> jobIter = mapper.readValues( jsonF.createParser(input), type); while (jobIter.hasNext()) { try { Map<String, String> jsonJob = jobIter.next(); AMDefinitionSLS amDef = AMDefinitionFactory.createFromSlsTrace(jsonJob, slsRunner); startAMs(amDef); } catch (Exception e) { LOG.error("Failed to create an AM: {}", e.getMessage()); } } } }
3.68
hbase_ActivePolicyEnforcement_getPolicyEnforcement
/** * Returns the proper {@link SpaceViolationPolicyEnforcement} implementation for the given table. * If the given table does not have a violation policy enforced, a "no-op" policy will be returned * which always allows an action. * @param tableName The table to fetch the policy for. * @return A non-null {@link SpaceViolationPolicyEnforcement} instance. */ public SpaceViolationPolicyEnforcement getPolicyEnforcement(TableName tableName) { SpaceViolationPolicyEnforcement policy = activePolicies.get(Objects.requireNonNull(tableName)); if (policy == null) { synchronized (locallyCachedPolicies) { // When we don't have an policy enforcement for the table, there could be one of two cases: // 1) The table has no quota defined // 2) The table is not in violation of its quota // In both of these cases, we want to make sure that access remains fast and we minimize // object creation. We can accomplish this by locally caching policies instead of creating // a new instance of the policy each time. policy = locallyCachedPolicies.get(tableName); // We have already created/cached the enforcement, use it again. `activePolicies` and // `snapshots` are immutable, thus this policy is valid for the lifetime of `this`. if (policy != null) { return policy; } // Create a PolicyEnforcement for this table and snapshot. The snapshot may be null // which is OK. policy = factory.createWithoutViolation(rss, tableName, snapshots.get(tableName)); // Cache the policy we created locallyCachedPolicies.put(tableName, policy); } } return policy; }
3.68
rocketmq-connect_ParsedSchema_deepEquals
/** * deep equals * * @param schema * @return */ default boolean deepEquals(ParsedSchema schema) { return Objects.equals(rawSchema(), schema.rawSchema()); }
3.68
flink_NettyShuffleEnvironment_registerLegacyNetworkMetrics
/** * Registers legacy network metric groups before shuffle service refactoring. * * <p>Registers legacy metric groups if shuffle service implementation is original default one. * * @deprecated should be removed in future */ @SuppressWarnings("DeprecatedIsStillUsed") @Deprecated public void registerLegacyNetworkMetrics( MetricGroup metricGroup, ResultPartitionWriter[] producedPartitions, InputGate[] inputGates) { NettyShuffleMetricFactory.registerLegacyNetworkMetrics( config.isNetworkDetailedMetrics(), metricGroup, producedPartitions, inputGates); }
3.68
flink_DateTimeUtils_addMonths
/** * Adds a given number of months to a date, represented as the number of days since the epoch. */ public static int addMonths(int date, int m) { int y0 = (int) extractFromDate(TimeUnitRange.YEAR, date); int m0 = (int) extractFromDate(TimeUnitRange.MONTH, date); int d0 = (int) extractFromDate(TimeUnitRange.DAY, date); m0 += m; int deltaYear = (int) DateTimeUtils.floorDiv(m0, 12); y0 += deltaYear; m0 = (int) DateTimeUtils.floorMod(m0, 12); if (m0 == 0) { y0 -= 1; m0 += 12; } int last = lastDay(y0, m0); if (d0 > last) { d0 = last; } return ymdToUnixDate(y0, m0, d0); }
3.68
framework_Table_resetPageBuffer
/** * Clears the current page buffer. Call this before * {@link #refreshRenderedCells()} to ensure that all content is updated * from the properties. */ protected void resetPageBuffer() { firstToBeRenderedInClient = -1; lastToBeRenderedInClient = -1; reqFirstRowToPaint = -1; reqRowsToPaint = -1; pageBuffer = null; }
3.68
flink_TableChange_dropConstraint
/** * A table change to drop constraint. * * <p>It is equal to the following statement: * * <pre> * ALTER TABLE &lt;table_name&gt; DROP CONSTRAINT &lt;constraint_name&gt; * </pre> * * @param constraintName the constraint to drop. * @return a TableChange represents the modification. */ static DropConstraint dropConstraint(String constraintName) { return new DropConstraint(constraintName); }
3.68
hadoop_ConnectionPool_addConnection
/** * Add a connection to the current pool. It uses a Copy-On-Write approach. * * @param conn New connection to add to the pool. */ public synchronized void addConnection(ConnectionContext conn) { List<ConnectionContext> tmpConnections = new ArrayList<>(this.connections); tmpConnections.add(conn); this.connections = tmpConnections; this.lastActiveTime = Time.now(); }
3.68
AreaShop_RentRegion_getRentedUntil
/** * Get the time until this region is rented (time from 1970 epoch). * @return The epoch time until which this region is rented */ public long getRentedUntil() { return getLongSetting("rent.rentedUntil"); }
3.68
hudi_HoodieMetaSyncOperations_getPartitionsByFilter
/** * Get the metadata of partitions that belong to the specified table * @param tableName * @return */ default List<Partition> getPartitionsByFilter(String tableName, String filter) { return Collections.emptyList(); }
3.68
hbase_MasterFileSystem_checkSubDir
/** * Make sure the directories under rootDir have good permissions. Create if necessary. */ private void checkSubDir(final Path p, final String dirPermsConfName) throws IOException { FileSystem fs = p.getFileSystem(conf); FsPermission dirPerms = new FsPermission(conf.get(dirPermsConfName, "700")); if (!fs.exists(p)) { if (isSecurityEnabled) { if (!fs.mkdirs(p, secureRootSubDirPerms)) { throw new IOException("HBase directory '" + p + "' creation failure."); } } else { if (!fs.mkdirs(p)) { throw new IOException("HBase directory '" + p + "' creation failure."); } } } if (isSecurityEnabled && !dirPerms.equals(fs.getFileStatus(p).getPermission())) { // check whether the permission match LOG.warn("Found HBase directory permissions NOT matching expected permissions for " + p.toString() + " permissions=" + fs.getFileStatus(p).getPermission() + ", expecting " + dirPerms + ". Automatically setting the permissions. " + "You can change the permissions by setting \"" + dirPermsConfName + "\" in hbase-site.xml " + "and restarting the master"); fs.setPermission(p, dirPerms); } }
3.68
streampipes_SupportedFormats_fstFormat
/** * Defines that a pipeline element (data processor or data sink) supports processing messaging * arriving in fast-serialization format * * @return The resulting {@link org.apache.streampipes.model.grounding.TransportFormat}. */ public static TransportFormat fstFormat() { return new TransportFormat(MessageFormat.FST); }
3.68
framework_VTextField_isWorkPending
/** * {@inheritDoc} * * @since 7.7.5 */ @Override public boolean isWorkPending() { return scheduled; }
3.68
hadoop_CommitUtils_verifyIsMagicCommitPath
/** * Verify that the path is a magic one. * @param fs filesystem * @param path path * @throws PathCommitException if the path isn't a magic commit path */ public static void verifyIsMagicCommitPath(S3AFileSystem fs, Path path) throws PathCommitException { verifyIsMagicCommitFS(fs); if (!fs.isMagicCommitPath(path)) { throw new PathCommitException(path, E_BAD_PATH); } }
3.68
flink_FormatDescriptor_build
/** Returns an immutable instance of {@link FormatDescriptor}. */ public FormatDescriptor build() { return new FormatDescriptor(format, options); }
3.68
flink_FieldParser_setCharset
/** * Sets the character set used for this parser. * * @param charset charset used for this parser. */ public void setCharset(Charset charset) { this.charset = charset; }
3.68
hbase_MultiByteBuff_put
/** Copies from the given byte[] to this MBB. */ @Override public MultiByteBuff put(byte[] src, int offset, int length) { checkRefCount(); if (this.curItem.remaining() >= length) { ByteBufferUtils.copyFromArrayToBuffer(this.curItem, src, offset, length); return this; } int end = offset + length; for (int i = offset; i < end; i++) { this.put(src[i]); } return this; }
3.68
morf_SpreadsheetDataSetProducer_getRecords
/** * Get all the records from the given Excel sheet. * * @param sheet worksheet to get records from * @return the extracted records */ private List<Record> getRecords(Sheet sheet) { try { long id = 1; int row = findHeaderRow(sheet); // Get the column headings final Map<String, Integer> columnHeadingsMap = new HashMap<>(); for (int i = 0; i < countHeadings(sheet, row); i++) { columnHeadingsMap.put(columnName(sheet.getCell(i, row).getContents()), i); } // Does this sheet have translations or not? final int translationColumn = getTranslationsColumnIndex(sheet, row); // -- Now get the data... // row++; // The data is always one row below the headings List<Record> records = new LinkedList<>(); for (; row < sheet.getRows(); row++) { final Cell[] cells = sheet.getRow(row); // If all the cells are blank then this is the end of the table if (allBlank(cells)) { break; } records.add(createRecord(id++, columnHeadingsMap, translationColumn, cells)); } return records; } catch (Exception e) { throw new RuntimeException("Failed to parse worksheet [" + sheet.getName() + "]", e); } }
3.68
hbase_RequestConverter_buildSetNormalizerRunningRequest
/** * Creates a protocol buffer SetNormalizerRunningRequest * @return a SetNormalizerRunningRequest */ public static SetNormalizerRunningRequest buildSetNormalizerRunningRequest(boolean on) { return SetNormalizerRunningRequest.newBuilder().setOn(on).build(); }
3.68
hadoop_OpportunisticContainerContext_addToOutstandingReqs
/** * Takes a list of ResourceRequests (asks), extracts the key information viz. * (Priority, ResourceName, Capability) and adds to the outstanding * OPPORTUNISTIC outstandingOpReqs map. The nested map is required to enforce * the current YARN constraint that only a single ResourceRequest can exist at * a give Priority and Capability. * * @param resourceAsks the list with the {@link ResourceRequest}s */ public void addToOutstandingReqs(List<ResourceRequest> resourceAsks) { for (ResourceRequest request : resourceAsks) { SchedulerRequestKey schedulerKey = SchedulerRequestKey.create(request); Map<Resource, EnrichedResourceRequest> reqMap = outstandingOpReqs.get(schedulerKey); if (reqMap == null) { reqMap = new HashMap<>(); outstandingOpReqs.put(schedulerKey, reqMap); } EnrichedResourceRequest eReq = reqMap.get(request.getCapability()); if (eReq == null) { eReq = new EnrichedResourceRequest(request); reqMap.put(request.getCapability(), eReq); } // Set numContainers only for ANY request if (ResourceRequest.isAnyLocation(request.getResourceName())) { eReq.getRequest().setResourceName(ResourceRequest.ANY); eReq.getRequest().setNumContainers(request.getNumContainers()); } else { eReq.addLocation(request.getResourceName(), request.getNumContainers()); } if (ResourceRequest.isAnyLocation(request.getResourceName())) { LOG.info("# of outstandingOpReqs in ANY (at " + "priority = " + schedulerKey.getPriority() + ", allocationReqId = " + schedulerKey.getAllocationRequestId() + ", with capability = " + request.getCapability() + " ) : " + ", with location = " + request.getResourceName() + " ) : " + ", numContainers = " + eReq.getRequest().getNumContainers()); } } }
3.68
hadoop_ItemInfo_increRetryCount
/** * Increments the retry count. */ public void increRetryCount() { this.retryCount++; }
3.68
hbase_RingBufferTruck_unloadAppend
/** * Unload the truck of its {@link FSWALEntry} payload. The internal reference is released. */ FSWALEntry unloadAppend() { FSWALEntry entry = this.entry; this.entry = null; this.type = Type.EMPTY; return entry; }
3.68
flink_FlinkContainers_builder
/** Creates a builder for {@link FlinkContainers}. */ public static Builder builder() { return new Builder(); }
3.68
flink_JobResult_createFrom
/** * Creates the {@link JobResult} from the given {@link AccessExecutionGraph} which must be in a * globally terminal state. * * @param accessExecutionGraph to create the JobResult from * @return JobResult of the given AccessExecutionGraph */ public static JobResult createFrom(AccessExecutionGraph accessExecutionGraph) { final JobID jobId = accessExecutionGraph.getJobID(); final JobStatus jobStatus = accessExecutionGraph.getState(); checkArgument( jobStatus.isTerminalState(), "The job " + accessExecutionGraph.getJobName() + '(' + jobId + ") is not in a " + "terminal state. It is in state " + jobStatus + '.'); final JobResult.Builder builder = new JobResult.Builder(); builder.jobId(jobId); builder.applicationStatus(ApplicationStatus.fromJobStatus(accessExecutionGraph.getState())); final long netRuntime = accessExecutionGraph.getStatusTimestamp(jobStatus) - accessExecutionGraph.getStatusTimestamp(JobStatus.INITIALIZING); // guard against clock changes final long guardedNetRuntime = Math.max(netRuntime, 0L); builder.netRuntime(guardedNetRuntime); builder.accumulatorResults(accessExecutionGraph.getAccumulatorsSerialized()); if (jobStatus == JobStatus.FAILED) { final ErrorInfo errorInfo = accessExecutionGraph.getFailureInfo(); checkNotNull(errorInfo, "No root cause is found for the job failure."); builder.serializedThrowable(errorInfo.getException()); } return builder.build(); }
3.68
hadoop_StringKeyConverter_decode
/* * (non-Javadoc) * * @see * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter * #decode(byte[]) */ @Override public String decode(byte[] bytes) { return Separator.decode(bytes, Separator.TAB, Separator.SPACE); }
3.68
flink_ManuallyTriggeredScheduledExecutorService_triggerAll
/** Triggers all {@code queuedRunnables}. */ public void triggerAll() { while (numQueuedRunnables() > 0) { trigger(); } }
3.68
framework_VScrollTable_updateTotalRows
/** For internal use only. May be removed or replaced in the future. */ public void updateTotalRows(UIDL uidl) { int newTotalRows = uidl.getIntAttribute("totalrows"); if (newTotalRows != getTotalRows()) { if (scrollBody != null) { if (getTotalRows() == 0) { tHead.clear(); tFoot.clear(); } initializedAndAttached = false; initialContentReceived = false; isNewBody = true; } setTotalRows(newTotalRows); } }
3.68
hadoop_Find_getOptions
/** Returns the current find options, creating them if necessary. */ @InterfaceAudience.Private FindOptions getOptions() { if (options == null) { options = createOptions(); } return options; }
3.68
hmily_JavaBeanBinder_setValue
/** * Sets value. * * @param instance the instance * @param value the value */ void setValue(final Supplier<?> instance, final Object value) { try { this.setter.setAccessible(true); this.setter.invoke(instance.get(), value); } catch (Exception ex) { throw new IllegalStateException("Unable to set value for property " + this.name, ex); } }
3.68
hadoop_TaskPool_castAndThrow
/** * Raise an exception of the declared type. * This method never completes normally. * @param e exception * @param <E> class of exceptions * @throws E a recast exception. */ @SuppressWarnings("unchecked") private static <E extends Exception> void castAndThrow(Exception e) throws E { if (e instanceof RuntimeException) { throw (RuntimeException) e; } throw (E) e; }
3.68
hbase_FavoredNodesPlan_removeFavoredNodes
/** * Remove a favored node assignment * @return the list of favored region server for this region based on the plan */ List<ServerName> removeFavoredNodes(RegionInfo region) { return favoredNodesMap.remove(region.getRegionNameAsString()); }
3.68
flink_Tuple10_copy
/** * Shallow tuple copy. * * @return A new Tuple with the same fields as this. */ @Override @SuppressWarnings("unchecked") public Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9> copy() { return new Tuple10<>( this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8, this.f9); }
3.68
flink_WindowListState_add
/** * Updates the operator state accessible by {@link #get(W)} by adding the given value to the * list of values. The next time {@link #get(W)} is called (for the same state partition) the * returned state will represent the updated list. * * <p>If null is passed in, the state value will remain unchanged. * * @param window The namespace for the state. * @param value The new value for the state. * @throws Exception Thrown if the system cannot access the state. */ public void add(W window, RowData value) throws Exception { windowState.setCurrentNamespace(window); windowState.add(value); }
3.68
cron-utils_ValidationFieldExpressionVisitor_isPeriodInRange
/** * Check if given period is compatible with range. * * @param fieldValue - to be validated * @throws IllegalArgumentException - if not in range */ @VisibleForTesting protected void isPeriodInRange(final FieldValue<?> fieldValue) { if (fieldValue instanceof IntegerFieldValue) { final int value = ((IntegerFieldValue) fieldValue).getValue(); if (!constraints.isPeriodInRange(value)) { throw new IllegalArgumentException( String.format("Period %s not in range [%s, %s]", value, constraints.getStartRange(), constraints.getEndRange())); } } }
3.68
flink_PatternStream_select
/** * Applies a select function to the detected pattern sequence. For each pattern sequence the * provided {@link PatternSelectFunction} is called. The pattern select function can produce * exactly one resulting element. * * <p>Applies a timeout function to a partial pattern sequence which has timed out. For each * partial pattern sequence the provided {@link PatternTimeoutFunction} is called. The pattern * timeout function can produce exactly one resulting element. * * @param patternTimeoutFunction The pattern timeout function which is called for each partial * pattern sequence which has timed out. * @param patternSelectFunction The pattern select function which is called for each detected * pattern sequence. * @param <L> Type of the resulting timeout elements * @param <R> Type of the resulting elements * @deprecated Use {@link PatternStream#select(OutputTag, PatternTimeoutFunction, * PatternSelectFunction)} that returns timed out events as a side-output * @return {@link DataStream} which contains the resulting elements or the resulting timeout * elements wrapped in an {@link Either} type. */ @Deprecated public <L, R> SingleOutputStreamOperator<Either<L, R>> select( final PatternTimeoutFunction<T, L> patternTimeoutFunction, final PatternSelectFunction<T, R> patternSelectFunction) { final TypeInformation<R> mainTypeInfo = TypeExtractor.getUnaryOperatorReturnType( patternSelectFunction, PatternSelectFunction.class, 0, 1, TypeExtractor.NO_INDEX, builder.getInputType(), null, false); final TypeInformation<L> timeoutTypeInfo = TypeExtractor.getUnaryOperatorReturnType( patternTimeoutFunction, PatternTimeoutFunction.class, 0, 1, TypeExtractor.NO_INDEX, builder.getInputType(), null, false); final TypeInformation<Either<L, R>> outTypeInfo = new EitherTypeInfo<>(timeoutTypeInfo, mainTypeInfo); final OutputTag<L> outputTag = new OutputTag<>(UUID.randomUUID().toString(), timeoutTypeInfo); final PatternProcessFunction<T, R> processFunction = fromSelect(builder.clean(patternSelectFunction)) .withTimeoutHandler(outputTag, builder.clean(patternTimeoutFunction)) .build(); final SingleOutputStreamOperator<R> mainStream = process(processFunction, mainTypeInfo); final DataStream<L> timedOutStream = mainStream.getSideOutput(outputTag); return mainStream.connect(timedOutStream).map(new CoMapTimeout<>()).returns(outTypeInfo); }
3.68
dubbo_ReflectionServiceDescriptor_getMethod
/** * Does not use Optional as return type to avoid potential performance decrease. * * @param methodName * @param paramTypes * @return */ public MethodDescriptor getMethod(String methodName, Class<?>[] paramTypes) { List<MethodDescriptor> methodModels = methods.get(methodName); if (CollectionUtils.isNotEmpty(methodModels)) { for (MethodDescriptor descriptor : methodModels) { if (Arrays.equals(paramTypes, descriptor.getParameterClasses())) { return descriptor; } } } return null; }
3.68
flink_CopyOnWriteSkipListStateMap_updateValueWithCopyOnWrite
/** * Update the value of the node with copy-on-write mode. The old value will be linked after the * new value, and can be still accessed. * * @param node the node to update. * @param value the value. * @return the old value pointer. */ private long updateValueWithCopyOnWrite(long node, byte[] value) { // a null value indicates this is a removed node int valueSize = value == null ? 0 : value.length; int totalValueLen = SkipListUtils.getValueMetaLen() + valueSize; long valuePointer = allocateSpace(totalValueLen); Node nodeStorage = getNodeSegmentAndOffset(node); MemorySegment nodeSegment = nodeStorage.nodeSegment; int offsetInNodeSegment = nodeStorage.nodeOffset; long oldValuePointer = SkipListUtils.getValuePointer(nodeSegment, offsetInNodeSegment); doWriteValue(valuePointer, value, stateMapVersion, node, oldValuePointer); // update value pointer in node after the new value has points the older value so that // old value can be accessed concurrently SkipListUtils.putValuePointer(nodeSegment, offsetInNodeSegment, valuePointer); return oldValuePointer; }
3.68
framework_TabSheetClose_getTestDescription
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTestDescription() */ @Override protected String getTestDescription() { return "When all tabs have not been closed, at least one tab should be visible. "; }
3.68
flink_DeltaIterationBase_setBroadcastVariables
/** * The DeltaIteration meta operator cannot have broadcast inputs. This method always throws an * exception. * * @param inputs Ignored */ public <X> void setBroadcastVariables(Map<String, Operator<X>> inputs) { throw new UnsupportedOperationException( "The DeltaIteration meta operator cannot have broadcast inputs."); }
3.68
framework_MenuBarFocus_getTestDescription
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTestDescription() */ @Override protected String getTestDescription() { return "This test checks if you can focus a menu bar on the client from the server side"; }
3.68
hbase_StoreFileReader_getFilterEntries
/** * The number of Bloom filter entries in this store file, or an estimate thereof, if the Bloom * filter is not loaded. This always returns an upper bound of the number of Bloom filter entries. * @return an estimate of the number of Bloom filter entries in this file */ public long getFilterEntries() { return generalBloomFilter != null ? generalBloomFilter.getKeyCount() : reader.getEntries(); }
3.68
hadoop_AbfsOutputStreamStatisticsImpl_toString
/** * String to show AbfsOutputStream statistics values in AbfsOutputStream. * * @return String with AbfsOutputStream statistics. */ @Override public String toString() { final StringBuilder outputStreamStats = new StringBuilder( "OutputStream Statistics{"); outputStreamStats.append(ioStatisticsStore.toString()); outputStreamStats.append("}"); return outputStreamStats.toString(); }
3.68
hbase_StoreUtils_getLowestTimestamp
/** * Gets lowest timestamp from candidate StoreFiles */ public static long getLowestTimestamp(Collection<HStoreFile> candidates) throws IOException { long minTs = Long.MAX_VALUE; for (HStoreFile storeFile : candidates) { minTs = Math.min(minTs, storeFile.getModificationTimestamp()); } return minTs; }
3.68
dubbo_FutureContext_setCompatibleFuture
/** * Guarantee 'using org.apache.dubbo.rpc.RpcContext.getFuture() before proxy returns' can work, a typical scenario is: * <pre>{@code * public final class TracingFilter implements Filter { * public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException { * Result result = invoker.invoke(invocation); * Future<Object> future = rpcContext.getFuture(); * if (future instanceof FutureAdapter) { * ((FutureAdapter) future).getFuture().setCallback(new FinishSpanCallback(span)); * } * ...... * } * } * }</pre> * * Start from 2.7.3, you don't have to get Future from RpcContext, we recommend using Result directly: * <pre>{@code * public final class TracingFilter implements Filter { * public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException { * Result result = invoker.invoke(invocation); * result.getResponseFuture().whenComplete(new FinishSpanCallback(span)); * ...... * } * } * }</pre> * */ @Deprecated public void setCompatibleFuture(CompletableFuture<?> compatibleFuture) { this.compatibleFuture = compatibleFuture; if (compatibleFuture != null) { this.setFuture(new FutureAdapter(compatibleFuture)); } }
3.68
AreaShop_FileManager_loadConfigFile
/** * Load the default.yml file * @return true if it has been loaded successfully, otherwise false */ public boolean loadConfigFile() { boolean result = true; File configFile = new File(configPath); // Safe the file from the jar to disk if it does not exist if(!configFile.exists()) { try( InputStream input = plugin.getResource(AreaShop.configFile); OutputStream output = new FileOutputStream(configFile) ) { int read; byte[] bytes = new byte[1024]; while((read = input.read(bytes)) != -1) { output.write(bytes, 0, read); } AreaShop.info("Default config file has been saved, should only happen on first startup"); } catch(IOException e) { AreaShop.warn("Something went wrong saving the config file: " + configFile.getAbsolutePath()); } } // Load config.yml from the plugin folder try( InputStreamReader custom = new InputStreamReader(new FileInputStream(configFile), Charsets.UTF_8); InputStreamReader normal = new InputStreamReader(plugin.getResource(AreaShop.configFile), Charsets.UTF_8); InputStreamReader hidden = new InputStreamReader(plugin.getResource(AreaShop.configFileHidden), Charsets.UTF_8) ) { config = YamlConfiguration.loadConfiguration(custom); if(config.getKeys(false).isEmpty()) { AreaShop.warn("File 'config.yml' is empty, check for errors in the log."); result = false; } else { config.addDefaults(YamlConfiguration.loadConfiguration(normal)); config.addDefaults(YamlConfiguration.loadConfiguration(hidden)); // Set the debug and chatprefix variables plugin.setDebug(this.getConfig().getBoolean("debug")); if(getConfig().isList("chatPrefix")) { plugin.setChatprefix(getConfig().getStringList("chatPrefix")); } else { ArrayList<String> list = new ArrayList<>(); list.add(getConfig().getString("chatPrefix")); plugin.setChatprefix(list); } } } catch(IOException e) { AreaShop.warn("Something went wrong while reading the config.yml file: " + configFile.getAbsolutePath()); result = false; } Utils.initialize(config); return result; }
3.68
hadoop_AuxiliaryService_getAuxiliaryLocalPathHandler
/** * Method that gets the local dirs path handler for this Auxiliary Service. * * @return auxiliaryPathHandler object that is used to read from and write to * valid local Dirs. */ public AuxiliaryLocalPathHandler getAuxiliaryLocalPathHandler() { return this.auxiliaryLocalPathHandler; }
3.68
flink_DefaultFailureEnricherContext_forGlobalFailure
/** Factory method returning a Global failure Context for the given params. */ public static Context forGlobalFailure( JobID jobID, String jobName, MetricGroup metricGroup, Executor ioExecutor, ClassLoader classLoader) { return new DefaultFailureEnricherContext( jobID, jobName, metricGroup, FailureType.GLOBAL, ioExecutor, classLoader); }
3.68
hbase_HRegionServer_triggerFlushInPrimaryRegion
/** * Trigger a flush in the primary region replica if this region is a secondary replica. Does not * block this thread. See RegionReplicaFlushHandler for details. */ private void triggerFlushInPrimaryRegion(final HRegion region) { if (ServerRegionReplicaUtil.isDefaultReplica(region.getRegionInfo())) { return; } TableName tn = region.getTableDescriptor().getTableName(); if ( !ServerRegionReplicaUtil.isRegionReplicaReplicationEnabled(region.conf, tn) || !ServerRegionReplicaUtil.isRegionReplicaWaitForPrimaryFlushEnabled(region.conf) || // If the memstore replication not setup, we do not have to wait for observing a flush event // from primary before starting to serve reads, because gaps from replication is not // applicable,this logic is from // TableDescriptorBuilder.ModifyableTableDescriptor.setRegionMemStoreReplication by // HBASE-13063 !region.getTableDescriptor().hasRegionMemStoreReplication() ) { region.setReadsEnabled(true); return; } region.setReadsEnabled(false); // disable reads before marking the region as opened. // RegionReplicaFlushHandler might reset this. // Submit it to be handled by one of the handlers so that we do not block OpenRegionHandler if (this.executorService != null) { this.executorService.submit(new RegionReplicaFlushHandler(this, region)); } else { LOG.info("Executor is null; not running flush of primary region replica for {}", region.getRegionInfo()); } }
3.68
hadoop_SQLDelegationTokenSecretManager_getDelegationTokenSeqNum
/** * Obtains the value of the last reserved sequence number. * @return Last reserved sequence number. */ @Override public int getDelegationTokenSeqNum() { try { return selectSequenceNum(); } catch (SQLException e) { throw new RuntimeException( "Failed to get token sequence number in SQL secret manager", e); } }
3.68
shardingsphere-elasticjob_JobRegistry_setCurrentShardingTotalCount
/** * Set sharding total count which running on current job server. * * @param jobName job name * @param currentShardingTotalCount sharding total count which running on current job server */ public void setCurrentShardingTotalCount(final String jobName, final int currentShardingTotalCount) { currentShardingTotalCountMap.put(jobName, currentShardingTotalCount); }
3.68
framework_ComponentLocator_getElementByPath
/** * Locates an element using a String locator (path) which identifies a DOM * element. The {@link #getPathForElement(Element)} method can be used for * the inverse operation, i.e. generating a string expression for a DOM * element. * * @since 5.4 * @param path * The String locator which identifies the target element. * @return The DOM element identified by {@code path} or null if the element * could not be located. */ public com.google.gwt.user.client.Element getElementByPath(String path) { for (LocatorStrategy strategy : locatorStrategies) { if (strategy.validatePath(path)) { Element element = strategy.getElementByPath(path); if (null != element) { return DOM.asOld(element); } } } return null; }
3.68
hibernate-validator_TokenIterator_replaceCurrentInterpolationTerm
/** * Replaces the current interpolation term with the given string. * * @param replacement The string to replace the current term with. */ public void replaceCurrentInterpolationTerm(String replacement) { Token token = new Token( replacement ); token.terminate(); tokenList.set( currentPosition - 1, token ); }
3.68
rocketmq-connect_JsonConverter_convertToJsonWithEnvelope
/** * convert to json with envelope * * @param schema * @param value * @return */ private JSONObject convertToJsonWithEnvelope(Schema schema, Object value) { return new JsonSchema.Envelope( asJsonSchema(schema), convertToJson(schema, value) ).toJsonNode(); }
3.68
flink_CompilerHints_addUniqueField
/** * Adds a field as having only unique values. * * @param field The field with unique values. */ public void addUniqueField(int field) { if (this.uniqueFields == null) { this.uniqueFields = new HashSet<FieldSet>(); } this.uniqueFields.add(new FieldSet(field)); }
3.68
flink_MemorySegment_putShort
/** * Writes the given short value into this buffer at the given position, using the native byte * order of the system. * * @param index The position at which the value will be written. * @param value The short value to be written. * @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the * segment size minus 2. */ public void putShort(int index, short value) { final long pos = address + index; if (index >= 0 && pos <= addressLimit - 2) { UNSAFE.putShort(heapMemory, pos, value); } else if (address > addressLimit) { throw new IllegalStateException("segment has been freed"); } else { // index is in fact invalid throw new IndexOutOfBoundsException(); } }
3.68
flink_MemoryManager_reserveMemory
/** * Reserves a memory chunk of a certain size for an owner from this memory manager. * * @param owner The owner to associate with the memory reservation, for the fallback release. * @param size size of memory to reserve. * @throws MemoryReservationException Thrown, if this memory manager does not have the requested * amount of memory any more. */ public void reserveMemory(Object owner, long size) throws MemoryReservationException { checkMemoryReservationPreconditions(owner, size); if (size == 0L) { return; } memoryBudget.reserveMemory(size); reservedMemory.compute( owner, (o, memoryReservedForOwner) -> memoryReservedForOwner == null ? size : memoryReservedForOwner + size); Preconditions.checkState(!isShutDown, "Memory manager has been concurrently shut down."); }
3.68
querydsl_ColumnMetadata_getName
/** * Extract the column name for the given path, returns the path name, if no ColumnMetadata is attached * * @param path patch * @return column name or path name */ public static String getName(Path<?> path) { Path<?> parent = path.getMetadata().getParent(); if (parent instanceof EntityPath) { Object columnMetadata = ((EntityPath<?>) parent).getMetadata(path); if (columnMetadata instanceof ColumnMetadata) { return ((ColumnMetadata) columnMetadata).getName(); } } return path.getMetadata().getName(); }
3.68
hadoop_RouterClientRMService_createRouterRMDelegationTokenSecretManager
/** * Create RouterRMDelegationTokenSecretManager. * In the YARN federation, the Router will replace the RM to * manage the RMDelegationToken (generate, update, cancel), * so the relevant configuration parameters still obtain the configuration parameters of the RM. * * @param conf Configuration * @return RouterDelegationTokenSecretManager. */ protected RouterDelegationTokenSecretManager createRouterRMDelegationTokenSecretManager( Configuration conf) { long secretKeyInterval = conf.getLong( YarnConfiguration.RM_DELEGATION_KEY_UPDATE_INTERVAL_KEY, YarnConfiguration.RM_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT); long tokenMaxLifetime = conf.getLong( YarnConfiguration.RM_DELEGATION_TOKEN_MAX_LIFETIME_KEY, YarnConfiguration.RM_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT); long tokenRenewInterval = conf.getLong( YarnConfiguration.RM_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, YarnConfiguration.RM_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT); long removeScanInterval = conf.getTimeDuration( YarnConfiguration.RM_DELEGATION_TOKEN_REMOVE_SCAN_INTERVAL_KEY, YarnConfiguration.RM_DELEGATION_TOKEN_REMOVE_SCAN_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS); return new RouterDelegationTokenSecretManager(secretKeyInterval, tokenMaxLifetime, tokenRenewInterval, removeScanInterval, conf); }
3.68
flink_BinaryStringData_contains
/** * Returns true if and only if this BinaryStringData contains the specified sequence of bytes * values. * * @param s the sequence to search for * @return true if this BinaryStringData contains {@code s}, false otherwise */ public boolean contains(final BinaryStringData s) { ensureMaterialized(); s.ensureMaterialized(); if (s.binarySection.sizeInBytes == 0) { return true; } int find = BinarySegmentUtils.find( binarySection.segments, binarySection.offset, binarySection.sizeInBytes, s.binarySection.segments, s.binarySection.offset, s.binarySection.sizeInBytes); return find != -1; }
3.68
hbase_TaskMonitor_createStatus
/** * Create a monitored task for users to inquire about the status * @param description description of the status * @param ignore whether to ignore to track(e.g. show/clear/expire) the task in the * {@link TaskMonitor} * @param enableJournal enable when the task contains some stage journals * @return a monitored task */ public synchronized MonitoredTask createStatus(String description, boolean ignore, boolean enableJournal) { MonitoredTask stat = new MonitoredTaskImpl(enableJournal, description); MonitoredTask proxy = (MonitoredTask) Proxy.newProxyInstance(stat.getClass().getClassLoader(), new Class<?>[] { MonitoredTask.class }, new PassthroughInvocationHandler<>(stat)); TaskAndWeakRefPair pair = new TaskAndWeakRefPair(stat, proxy); if (tasks.isFull()) { purgeExpiredTasks(); } if (!ignore) { tasks.add(pair); } return proxy; }
3.68
flink_CheckpointedPosition_getOffset
/** Gets the offset that the reader will seek to when restored from this checkpoint. */ public long getOffset() { return offset; }
3.68
flink_StateSerializerProvider_fromNewRegisteredSerializer
/** * Creates a {@link StateSerializerProvider} from the registered state serializer. * * <p>If the state is a restored one, and the previous serializer's snapshot is obtained later * on, is should be supplied via the {@link * #setPreviousSerializerSnapshotForRestoredState(TypeSerializerSnapshot)} method. * * @param registeredStateSerializer the new state's registered serializer. * @param <T> the type of the state. * @return a new {@link StateSerializerProvider}. */ public static <T> StateSerializerProvider<T> fromNewRegisteredSerializer( TypeSerializer<T> registeredStateSerializer) { return new EagerlyRegisteredStateSerializerProvider<>(registeredStateSerializer); }
3.68
morf_SqlUtils_when
/** * Builder method for {@link WhenCondition}. * * <p> * Example: * </p> * * <pre> * <code> * caseStatement(when(eq(field("receiptType"), literal("R"))).then(literal("Receipt")), * when(eq(field("receiptType"), literal("S"))).then(literal("Agreement Suspense")), * when(eq(field("receiptType"), literal("T"))).then(literal("General Suspense"))) * .otherwise(literal("UNKNOWN")) * </code> * </pre> * * @see #caseStatement(WhenCondition...) * * @param criterion Criteria * @return A builder to create a {@link WhenCondition}. */ public static WhenConditionBuilder when(Criterion criterion) { return new WhenConditionBuilder(criterion); }
3.68
framework_AbstractJavaScriptRenderer_callFunction
/** * Invoke a named function that the connector JavaScript has added to the * JavaScript connector wrapper object. The arguments can be any boxed * primitive type, String, {@link JsonValue} or arrays of any other * supported type. Complex types (e.g. List, Set, Map, Connector or any * JavaBean type) must be explicitly serialized to a {@link JsonValue} * before sending. This can be done either with * {@link JsonCodec#encode(Object, JsonValue, java.lang.reflect.Type, com.vaadin.ui.ConnectorTracker)} * or using the factory methods in {@link Json}. * * @param name * the name of the function * @param arguments * function arguments */ protected void callFunction(String name, Object... arguments) { callbackHelper.invokeCallback(name, arguments); }
3.68
flink_HsMemoryDataManager_registerNewConsumer
/** * Register {@link HsSubpartitionConsumerInternalOperations} to {@link * #subpartitionViewOperationsMap}. It is used to obtain the consumption progress of the * subpartition. */ public HsDataView registerNewConsumer( int subpartitionId, HsConsumerId consumerId, HsSubpartitionConsumerInternalOperations viewOperations) { HsSubpartitionConsumerInternalOperations oldView = subpartitionViewOperationsMap.get(subpartitionId).put(consumerId, viewOperations); Preconditions.checkState( oldView == null, "Each subpartition view should have unique consumerId."); return getSubpartitionMemoryDataManager(subpartitionId).registerNewConsumer(consumerId); }
3.68
pulsar_ManagedLedgerConfig_getMetadataMaxEntriesPerLedger
/** * @return the metadataMaxEntriesPerLedger */ public int getMetadataMaxEntriesPerLedger() { return metadataMaxEntriesPerLedger; }
3.68
zxing_URIResultParser_isPossiblyMaliciousURI
/** * @return true if the URI contains suspicious patterns that may suggest it intends to * mislead the user about its true nature. At the moment this looks for the presence * of user/password syntax in the host/authority portion of a URI which may be used * in attempts to make the URI's host appear to be other than it is. Example: * http://[email protected] This URI connects to phisher.com but may appear * to connect to yourbank.com at first glance. */ static boolean isPossiblyMaliciousURI(String uri) { return !ALLOWED_URI_CHARS_PATTERN.matcher(uri).matches() || USER_IN_HOST.matcher(uri).find(); }
3.68
pulsar_ManagedCursor_scan
/** * Scan the cursor from the current position up to the end. * Please note that this is an expensive operation * @param startingPosition the position to start from, if not provided the scan will start from * the lastDeleteMarkPosition * @param condition a condition to continue the scan, the condition can access the entry * @param batchSize number of entries to process at each read * @param maxEntries maximum number of entries to scan * @param timeOutMs maximum time to spend on this operation * @throws InterruptedException * @throws ManagedLedgerException */ default CompletableFuture<ScanOutcome> scan(Optional<Position> startingPosition, Predicate<Entry> condition, int batchSize, long maxEntries, long timeOutMs) { return CompletableFuture.failedFuture(new UnsupportedOperationException()); }
3.68
streampipes_TokenizerProcessor_declareModel
//TODO: Maybe change outputStrategy to an array instead of tons of different strings @Override public DataProcessorDescription declareModel() { return ProcessingElementBuilder.create("org.apache.streampipes.processors.textmining.jvm.tokenizer") .category(DataProcessorType.ENRICH_TEXT) .withAssets(Assets.DOCUMENTATION, Assets.ICON) .withLocales(Locales.EN) .requiredFile(Labels.withId(BINARY_FILE_KEY)) .requiredStream(StreamRequirementsBuilder .create() .requiredPropertyWithUnaryMapping( EpRequirements.stringReq(), Labels.withId(DETECTION_FIELD_KEY), PropertyScope.NONE) .build()) .outputStrategy(OutputStrategies.append(EpProperties.listStringEp(Labels.withId(TOKEN_LIST_FIELD_KEY), TOKEN_LIST_FIELD_KEY, "http://schema.org/ItemList"))) .build(); }
3.68
framework_Tree_removeListener
/** * @deprecated As of 7.0, replaced by * {@link #removeItemClickListener(ItemClickListener)} */ @Override @Deprecated public void removeListener(ItemClickListener listener) { removeItemClickListener(listener); }
3.68
hadoop_BaseTableRW_getTableName
/** * Get the table name based on the input config parameters. * * @param conf HBase configuration from which table name will be fetched. * @param tableNameInConf the table name parameter in conf. * @param defaultTableName the default table name. * @return A {@link TableName} object. */ public static TableName getTableName(Configuration conf, String tableNameInConf, String defaultTableName) { String tableName = conf.get(tableNameInConf, defaultTableName); return getTableName(conf, tableName); }
3.68
hbase_OrderedBytes_getVaruint64
/** * Decode a sequence of bytes in {@code src} as a varuint64. Compliment the encoded value when * {@code comp} is true. * @return the decoded value. */ static long getVaruint64(PositionedByteRange src, boolean comp) { assert src.getRemaining() >= lengthVaruint64(src, comp); final long ret; Order ord = comp ? DESCENDING : ASCENDING; byte x = src.get(); final int a0 = ord.apply(x) & 0xff, a1, a2, a3, a4, a5, a6, a7, a8; if (-1 == unsignedCmp(a0, 241)) { return a0; } x = src.get(); a1 = ord.apply(x) & 0xff; if (-1 == unsignedCmp(a0, 249)) { return (a0 - 241L) * 256 + a1 + 240; } x = src.get(); a2 = ord.apply(x) & 0xff; if (a0 == 249) { return 2288L + 256 * a1 + a2; } x = src.get(); a3 = ord.apply(x) & 0xff; if (a0 == 250) { return ((long) a1 << 16L) | (a2 << 8) | a3; } x = src.get(); a4 = ord.apply(x) & 0xff; ret = (((long) a1) << 24) | (a2 << 16) | (a3 << 8) | a4; if (a0 == 251) { return ret; } x = src.get(); a5 = ord.apply(x) & 0xff; if (a0 == 252) { return (ret << 8) | a5; } x = src.get(); a6 = ord.apply(x) & 0xff; if (a0 == 253) { return (ret << 16) | (a5 << 8) | a6; } x = src.get(); a7 = ord.apply(x) & 0xff; if (a0 == 254) { return (ret << 24) | (a5 << 16) | (a6 << 8) | a7; } x = src.get(); a8 = ord.apply(x) & 0xff; return (ret << 32) | (((long) a5) << 24) | (a6 << 16) | (a7 << 8) | a8; }
3.68
framework_ApplicationConfiguration_getThemeName
/** * @return the theme name used when initializing the application * @deprecated as of 7.3. Use {@link UIConnector#getActiveTheme()} to get * the theme currently in use */ @Deprecated public String getThemeName() { return getJsoConfiguration(id).getConfigString("theme"); }
3.68
hadoop_GlobalPolicy_registerPaths
/** * Return a map of the object type and RM path to request it from - the * framework will query these paths and provide the objects to the policy. * Delegating this responsibility to the PolicyGenerator enables us to avoid * duplicate calls to the same * endpoints as the GlobalPolicy is invoked * once per queue. * * @return a map of the object type and RM path. */ protected Map<Class<?>, String> registerPaths() { // Default register nothing return Collections.emptyMap(); }
3.68
hbase_RegionServerObserver_postExecuteProcedures
/** * This will be called after executing procedures * @param ctx the environment to interact with the framework and region server. */ default void postExecuteProcedures(ObserverContext<RegionServerCoprocessorEnvironment> ctx) throws IOException { }
3.68
shardingsphere-elasticjob_JobConfiguration_jobExecutorThreadPoolSizeProviderType
/** * Set job executor thread pool size provider type. * * @param jobExecutorThreadPoolSizeProviderType job executor thread pool size provider type * @return job configuration builder */ public Builder jobExecutorThreadPoolSizeProviderType(final String jobExecutorThreadPoolSizeProviderType) { this.jobExecutorThreadPoolSizeProviderType = jobExecutorThreadPoolSizeProviderType; return this; }
3.68
hudi_HoodieTableMetadataUtil_coerceToComparable
/** * Given a schema, coerces provided value to instance of {@link Comparable<?>} such that * it could subsequently used in column stats * * NOTE: This method has to stay compatible with the semantic of * {@link ParquetUtils#readRangeFromParquetMetadata} as they are used in tandem */ private static Comparable<?> coerceToComparable(Schema schema, Object val) { if (val == null) { return null; } switch (schema.getType()) { case UNION: // TODO we need to handle unions in general case as well return coerceToComparable(resolveNullableSchema(schema), val); case FIXED: case BYTES: if (schema.getLogicalType() instanceof LogicalTypes.Decimal) { return (Comparable<?>) val; } return (ByteBuffer) val; case INT: if (schema.getLogicalType() == LogicalTypes.date() || schema.getLogicalType() == LogicalTypes.timeMillis()) { // NOTE: This type will be either {@code java.sql.Date} or {org.joda.LocalDate} // depending on the Avro version. Hence, we simply cast it to {@code Comparable<?>} return (Comparable<?>) val; } return (Integer) val; case LONG: if (schema.getLogicalType() == LogicalTypes.timeMicros() || schema.getLogicalType() == LogicalTypes.timestampMicros() || schema.getLogicalType() == LogicalTypes.timestampMillis()) { // NOTE: This type will be either {@code java.sql.Date} or {org.joda.LocalDate} // depending on the Avro version. Hence, we simply cast it to {@code Comparable<?>} return (Comparable<?>) val; } return (Long) val; case STRING: // unpack the avro Utf8 if possible return val.toString(); case FLOAT: case DOUBLE: case BOOLEAN: return (Comparable<?>) val; // TODO add support for those types case ENUM: case MAP: case NULL: case RECORD: case ARRAY: return null; default: throw new IllegalStateException("Unexpected type: " + schema.getType()); } }
3.68
framework_VLoadingIndicator_setFirstDelay
/** * Sets the delay (in ms) which must pass before the loading indicator moves * into the "first" state and is shown to the user. * * @param firstDelay * The delay (in ms) until moving into the "first" state. Counted * from when {@link #trigger()} is called. */ public void setFirstDelay(int firstDelay) { this.firstDelay = firstDelay; }
3.68
pulsar_AuthenticationFactoryOAuth2_clientCredentials
/** * Authenticate with client credentials. * * @param issuerUrl the issuer URL * @param credentialsUrl the credentials URL * @param audience An optional field. The audience identifier used by some Identity Providers, like Auth0. * @param scope An optional field. The value of the scope parameter is expressed as a list of space-delimited, * case-sensitive strings. The strings are defined by the authorization server. * If the value contains multiple space-delimited strings, their order does not matter, * and each string adds an additional access range to the requested scope. * From here: https://datatracker.ietf.org/doc/html/rfc6749#section-4.4.2 * @return an Authentication object */ public static Authentication clientCredentials(URL issuerUrl, URL credentialsUrl, String audience, String scope) { ClientCredentialsFlow flow = ClientCredentialsFlow.builder() .issuerUrl(issuerUrl) .privateKey(credentialsUrl.toExternalForm()) .audience(audience) .scope(scope) .build(); return new AuthenticationOAuth2(flow, Clock.systemDefaultZone()); }
3.68
rocketmq-connect_AbstractLocalSchemaRegistryClient_compareAndGet
/** * compare and get * * @param subject * @param schemaName * @param request * @param schemaRecordAllVersion * @param schema * @return */ protected SchemaResponse compareAndGet(String namespace, String subject, String schemaName, RegisterSchemaRequest request, List<SchemaRecordDto> schemaRecordAllVersion, ParsedSchema schema) { SchemaRecordDto matchSchemaRecord = compareAndGet(schemaRecordAllVersion, schemaName, schema); if (matchSchemaRecord != null) { GetSchemaResponse getSchemaResponse = new GetSchemaResponse(); getSchemaResponse.setRecordId(matchSchemaRecord.getRecordId()); return SchemaResponse.builder() .subjectName(getSchemaResponse.getSubjectFullName()) .schemaName(getSchemaResponse.getSchemaFullName()) .recordId(getSchemaResponse.getRecordId()) .idl(request.getSchemaIdl()) .build(); } // match is null UpdateSchemaRequest updateSchemaRequest = UpdateSchemaRequest.builder() .schemaIdl(request.getSchemaIdl()) .desc(request.getDesc()) .owner(request.getOwner()) .build(); try { UpdateSchemaResponse updateSchemaResponse = schemaRegistryClient.updateSchema(cluster, namespace, subject, schemaName, updateSchemaRequest); GetSchemaResponse getSchemaResponse = new GetSchemaResponse(); getSchemaResponse.setRecordId(updateSchemaResponse.getRecordId()); return SchemaResponse.builder() .subjectName(subject) .schemaName(schemaName) .recordId(updateSchemaResponse.getRecordId()) .build(); } catch (RestClientException | IOException e) { throw new RuntimeException(e); } }
3.68
framework_AnimationUtil_setAnimationDelay
/** * For internal use only. May be removed or replaced in the future. * * Set the animation-delay CSS property. * * @param elem * the element whose animation-delay to set * @param delay * the delay as a valid CSS value */ public static void setAnimationDelay(Element elem, String delay) { Style style = elem.getStyle(); style.setProperty(ANIMATION_PROPERTY_NAME + "Delay", delay); }
3.68
flink_TaskStateStats_getJobVertexId
/** @return ID of the operator the statistics belong to. */ public JobVertexID getJobVertexId() { return jobVertexId; }
3.68
zxing_BinaryBitmap_rotateCounterClockwise
/** * Returns a new object with rotated image data by 90 degrees counterclockwise. * Only callable if {@link #isRotateSupported()} is true. * * @return A rotated version of this object. */ public BinaryBitmap rotateCounterClockwise() { LuminanceSource newSource = binarizer.getLuminanceSource().rotateCounterClockwise(); return new BinaryBitmap(binarizer.createBinarizer(newSource)); }
3.68
graphhopper_Helper_createFormatter
/** * Creates a SimpleDateFormat with ENGLISH locale. */ public static DateFormat createFormatter(String str) { DateFormat df = new SimpleDateFormat(str, Locale.ENGLISH); df.setTimeZone(UTC); return df; }
3.68
framework_Table_isPartialRowUpdate
/** * Subclass and override this to enable partial row updates and additions, * which bypass the normal caching mechanism. This is useful for e.g. * TreeTable. * * @return true if this update is a partial row update, false if not. For * plain Table it is always false. */ protected boolean isPartialRowUpdate() { return false; }
3.68
graphhopper_VectorTile_setLayers
/** * <code>repeated .vector_tile.Tile.Layer layers = 3;</code> */ public Builder setLayers( int index, vector_tile.VectorTile.Tile.Layer.Builder builderForValue) { if (layersBuilder_ == null) { ensureLayersIsMutable(); layers_.set(index, builderForValue.build()); onChanged(); } else { layersBuilder_.setMessage(index, builderForValue.build()); } return this; }
3.68
hadoop_AHSWebServices_getLogs
//TODO: YARN-4993: Refactory ContainersLogsBlock, AggregatedLogsBlock and // container log webservice introduced in AHS to minimize // the duplication. @GET @Path("/containerlogs/{containerid}/{filename}") @Produces({ MediaType.TEXT_PLAIN + "; " + JettyUtils.UTF_8 }) @Public @Unstable public Response getLogs(@Context HttpServletRequest req, @Context HttpServletResponse res, @PathParam(YarnWebServiceParams.CONTAINER_ID) String containerIdStr, @PathParam(YarnWebServiceParams.CONTAINER_LOG_FILE_NAME) String filename, @QueryParam(YarnWebServiceParams.RESPONSE_CONTENT_FORMAT) String format, @QueryParam(YarnWebServiceParams.RESPONSE_CONTENT_SIZE) String size, @QueryParam(YarnWebServiceParams.NM_ID) String nmId, @QueryParam(YarnWebServiceParams.REDIRECTED_FROM_NODE) @DefaultValue("false") boolean redirectedFromNode, @QueryParam(YarnWebServiceParams.MANUAL_REDIRECTION) @DefaultValue("false") boolean manualRedirection) { initForReadableEndpoints(res); return logServlet.getLogFile(req, containerIdStr, filename, format, size, nmId, redirectedFromNode, null, manualRedirection); }
3.68
hmily_QuoteCharacter_wrap
/** * Wrap value with quote character. * * @param value value to be wrapped * @return wrapped value */ public String wrap(final String value) { return String.format("%s%s%s", startDelimiter, value, endDelimiter); }
3.68