name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hadoop_BlockBlobAppendStream_hasCapability
/** * The Synchronization capabilities of this stream depend upon the compaction * policy. * @param capability string to query the stream support for. * @return true for hsync and hflush when compaction is enabled. */ @Override public boolean hasCapability(String capability) { if (!compactionEnabled) { return false; } return StoreImplementationUtils.isProbeForSyncable(capability); }
3.68
framework_AbstractRemoteDataSource_indexOf
/** * Retrieves the index for given row object. * <p> * <em>Note:</em> This method does not verify that the given row object * exists at all in this DataSource. * * @param row * the row object * @return index of the row; or <code>-1</code> if row is not available */ public int indexOf(T row) { Object key = getRowKey(row); if (keyToIndexMap.containsKey(key)) { return keyToIndexMap.get(key); } return -1; }
3.68
pulsar_RateLimiter_acquire
/** * Acquires the given number of permits from this {@code RateLimiter}, blocking until the request be granted. * * @param acquirePermit * the number of permits to acquire */ public synchronized void acquire(long acquirePermit) throws InterruptedException { checkArgument(!isClosed(), "Rate limiter is already shutdown"); checkArgument(acquirePermit <= this.permits, "acquiring permits must be less or equal than initialized rate =" + this.permits); // lazy init and start task only once application start using it if (renewTask == null) { renewTask = createTask(); } boolean canAcquire = false; do { canAcquire = acquirePermit < 0 || acquiredPermits < this.permits; if (!canAcquire) { wait(); } else { acquiredPermits += acquirePermit; } } while (!canAcquire); }
3.68
hadoop_FlowRunCoprocessor_preGetOp
/* * (non-Javadoc) * * Creates a {@link FlowScanner} Scan so that it can correctly process the * contents of {@link FlowRunTable}. * * @see * org.apache.hadoop.hbase.coprocessor.BaseRegionObserver#preGetOp(org.apache * .hadoop.hbase.coprocessor.ObserverContext, * org.apache.hadoop.hbase.client.Get, java.util.List) */ @Override public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> e, Get get, List<Cell> results) throws IOException { Scan scan = new Scan(get); scan.setMaxVersions(); RegionScanner scanner = null; try { scanner = new FlowScanner(e.getEnvironment(), scan, region.getScanner(scan), FlowScannerOperation.READ); scanner.next(results); e.bypass(); } finally { if (scanner != null) { scanner.close(); } } }
3.68
hbase_ZKDump_getServerStats
/** * Gets the statistics from the given server. * @param server The server to get the statistics from. * @param timeout The socket timeout to use. * @return The array of response strings. * @throws IOException When the socket communication fails. */ private static String[] getServerStats(String server, int timeout) throws IOException { String[] sp = server.split(":"); if (sp.length == 0) { return null; } String host = sp[0]; int port = sp.length > 1 ? Integer.parseInt(sp[1]) : HConstants.DEFAULT_ZOOKEEPER_CLIENT_PORT; try (Socket socket = new Socket()) { InetSocketAddress sockAddr = new InetSocketAddress(host, port); if (sockAddr.isUnresolved()) { throw new UnknownHostException(host + " cannot be resolved"); } socket.connect(sockAddr, timeout); socket.setSoTimeout(timeout); try ( PrintWriter out = new PrintWriter(new BufferedWriter( new OutputStreamWriter(socket.getOutputStream(), StandardCharsets.UTF_8)), true); BufferedReader in = new BufferedReader( new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8))) { out.println("stat"); out.flush(); ArrayList<String> res = new ArrayList<>(); while (true) { String line = in.readLine(); if (line != null) { res.add(line); } else { break; } } return res.toArray(new String[res.size()]); } } }
3.68
hudi_KafkaConnectConfigs_withProperties
// Kafka connect task are passed with props with type Map<> public Builder withProperties(Map<?, ?> properties) { connectConfigs.getProps().putAll(properties); return this; }
3.68
flink_HiveParserIntervalDayTime_getDouble
/** @return double representation of the interval day time, accurate to nanoseconds */ public double getDouble() { return totalSeconds + nanos / 1000000000; }
3.68
framework_Table_paintRowAttributes
/** * A method where extended Table implementations may add their custom * attributes for rows. * * @param target * @param itemId */ protected void paintRowAttributes(PaintTarget target, Object itemId) throws PaintException { }
3.68
morf_CompositeSchema_views
/** * @see org.alfasoftware.morf.metadata.Schema#views() */ @Override public Collection<View> views() { Set<View> result = Sets.newHashSet(); Set<String> seenViews = Sets.newHashSet(); for (Schema schema : delegates) { for (View view : schema.views()) { if (seenViews.add(view.getName().toUpperCase())) { result.add(view); } } } return result; }
3.68
hbase_RateLimiter_set
/** * Set the RateLimiter max available resources and refill period. * @param limit The max value available resource units can be refilled to. * @param timeUnit Timeunit factor for translating to ms. */ public synchronized void set(final long limit, final TimeUnit timeUnit) { switch (timeUnit) { case MILLISECONDS: tunit = 1; break; case SECONDS: tunit = 1000; break; case MINUTES: tunit = 60 * 1000; break; case HOURS: tunit = 60 * 60 * 1000; break; case DAYS: tunit = 24 * 60 * 60 * 1000; break; default: throw new RuntimeException("Unsupported " + timeUnit.name() + " TimeUnit."); } this.limit = limit; this.avail = limit; }
3.68
morf_H2Dialect_getSqlForNow
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForNow(org.alfasoftware.morf.sql.element.Function) */ @Override protected String getSqlForNow(Function function) { return "CURRENT_TIMESTAMP()"; }
3.68
hudi_HiveSchemaUtil_convertParquetSchemaToHiveFieldSchema
/** * Returns equivalent Hive table Field schema read from a parquet file. * * @param messageType : Parquet Schema * @return : Hive Table schema read from parquet file List[FieldSchema] without partitionField */ public static List<FieldSchema> convertParquetSchemaToHiveFieldSchema(MessageType messageType, HiveSyncConfig syncConfig) throws IOException { return convertMapSchemaToHiveFieldSchema(parquetSchemaToMapSchema(messageType, syncConfig.getBoolean(HIVE_SUPPORT_TIMESTAMP_TYPE), false), syncConfig); }
3.68
hadoop_RouterFedBalance_setForceCloseOpen
/** * Whether force close all open files while there is no diff. * @param value true if force close all the open files. */ public Builder setForceCloseOpen(boolean value) { this.forceCloseOpen = value; return this; }
3.68
morf_AbstractSqlDialectTest_testSelectWithConcatenationUsingFunction
/** * Tests concatenation in a select with {@linkplain Function}. */ @Test public void testSelectWithConcatenationUsingFunction() { SelectStatement stmt = new SelectStatement(new ConcatenatedField(new FieldReference("assetDescriptionLine1"), max(new FieldReference("scheduleStartDate"))).as("test")).from(new TableReference("schedule")); String result = testDialect.convertStatementToSQL(stmt); assertEquals("Select script should match expected", expectedConcatenationWithFunction(), result); }
3.68
hbase_LogLevel_process
/** * Configures the client to send HTTP request to the URL. Supports SPENGO for authentication. * @param urlString URL and query string to the daemon's web UI * @throws Exception if unable to connect */ private void process(String urlString) throws Exception { URL url = new URL(urlString); System.out.println("Connecting to " + url); HttpURLConnection connection = connect(url); HttpExceptionUtils.validateResponse(connection, 200); // read from the servlet try ( InputStreamReader streamReader = new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8); BufferedReader bufferedReader = new BufferedReader(streamReader)) { bufferedReader.lines().filter(Objects::nonNull).filter(line -> line.startsWith(MARKER)) .forEach(line -> System.out.println(TAG.matcher(line).replaceAll(""))); } catch (IOException ioe) { System.err.println("" + ioe); } }
3.68
hadoop_IOStatisticsContextIntegration_enableIOStatisticsContext
/** * A method to enable IOStatisticsContext to override if set otherwise in * the configurations for tests. */ @VisibleForTesting public static void enableIOStatisticsContext() { if (!isThreadIOStatsEnabled) { LOG.info("Enabling Thread IOStatistics.."); isThreadIOStatsEnabled = true; } }
3.68
hbase_GetUserPermissionsRequest_withUserName
/** * user name could be null if need all global/namespace/table permissions */ public Builder withUserName(String userName) { this.userName = userName; return this; }
3.68
flink_CompactingHashTable_getPartitionSize
/** * Size of all memory segments owned by the partitions of this hash table excluding the * compaction partition * * @return size in bytes */ private long getPartitionSize() { long numSegments = 0; for (InMemoryPartition<T> p : this.partitions) { numSegments += p.getBlockCount(); } return numSegments * this.segmentSize; }
3.68
flink_BinaryIndexedSortable_writeIndexAndNormalizedKey
/** Write of index and normalizedKey. */ protected void writeIndexAndNormalizedKey(RowData record, long currOffset) { // add the pointer and the normalized key this.currentSortIndexSegment.putLong(this.currentSortIndexOffset, currOffset); if (this.numKeyBytes != 0) { normalizedKeyComputer.putKey( record, this.currentSortIndexSegment, this.currentSortIndexOffset + OFFSET_LEN); } this.currentSortIndexOffset += this.indexEntrySize; this.numRecords++; }
3.68
hbase_SnapshotManager_updateWorkingDirAclsIfRequired
/** * If the parent dir of the snapshot working dir (e.g. /hbase/.hbase-snapshot) has non-empty ACLs, * use them for the current working dir (e.g. /hbase/.hbase-snapshot/.tmp/{snapshot-name}) so that * regardless of whether the snapshot commit phase performs atomic rename or non-atomic copy of * the working dir to new snapshot dir, the ACLs are retained. * @param workingDir working dir to build the snapshot. * @param workingDirFS working dir file system. * @throws IOException If ACL read/modify operation fails. */ private static void updateWorkingDirAclsIfRequired(Path workingDir, FileSystem workingDirFS) throws IOException { if ( !workingDirFS.hasPathCapability(workingDir, CommonPathCapabilities.FS_ACLS) || workingDir.getParent() == null || workingDir.getParent().getParent() == null ) { return; } AclStatus snapshotWorkingParentDirStatus; try { snapshotWorkingParentDirStatus = workingDirFS.getAclStatus(workingDir.getParent().getParent()); } catch (IOException e) { LOG.warn("Unable to retrieve ACL status for path: {}, current working dir path: {}", workingDir.getParent().getParent(), workingDir, e); return; } List<AclEntry> snapshotWorkingParentDirAclStatusEntries = snapshotWorkingParentDirStatus.getEntries(); if ( snapshotWorkingParentDirAclStatusEntries != null && snapshotWorkingParentDirAclStatusEntries.size() > 0 ) { workingDirFS.modifyAclEntries(workingDir, snapshotWorkingParentDirAclStatusEntries); } }
3.68
framework_AbstractSplitPanel_isLocked
/** * Is the SplitPanel handle locked (user not allowed to change split * position by dragging). * * @return <code>true</code> if locked, <code>false</code> otherwise. */ public boolean isLocked() { return getSplitterState(false).locked; }
3.68
framework_AbstractColorPickerConnector_getCaption
/** * Get caption for the color picker widget. * * @return the caption */ protected String getCaption() { if (getState().showDefaultCaption && (getState().caption == null || getState().caption.isEmpty())) { return getState().color; } return getState().caption; }
3.68
flink_JoinTypeUtil_getFlinkJoinType
/** Converts {@link JoinRelType} to {@link FlinkJoinType}. */ public static FlinkJoinType getFlinkJoinType(JoinRelType joinRelType) { switch (joinRelType) { case INNER: return FlinkJoinType.INNER; case LEFT: return FlinkJoinType.LEFT; case RIGHT: return FlinkJoinType.RIGHT; case FULL: return FlinkJoinType.FULL; case SEMI: return FlinkJoinType.SEMI; case ANTI: return FlinkJoinType.ANTI; default: throw new IllegalArgumentException("invalid: " + joinRelType); } }
3.68
dubbo_ReflectUtils_findMethodByMethodName
/** * @param clazz Target class to find method * @param methodName Method signature, e.g.: method1(int, String). It is allowed to provide method name only, e.g.: method2 * @return target method * @throws NoSuchMethodException * @throws ClassNotFoundException * @throws IllegalStateException when multiple methods are found (overridden method when parameter info is not provided) * @deprecated Recommend {@link MethodUtils#findMethod(Class, String, Class[])} */ @Deprecated public static Method findMethodByMethodName(Class<?> clazz, String methodName) throws NoSuchMethodException, ClassNotFoundException { return findMethodByMethodSignature(clazz, methodName, null); }
3.68
framework_BasicEventMoveHandler_eventMove
/* * (non-Javadoc) * * @see * com.vaadin.addon.calendar.ui.CalendarComponentEvents.EventMoveHandler * #eventMove * (com.vaadin.addon.calendar.ui.CalendarComponentEvents.MoveEvent) */ @Override public void eventMove(MoveEvent event) { CalendarEvent calendarEvent = event.getCalendarEvent(); if (calendarEvent instanceof EditableCalendarEvent) { EditableCalendarEvent editableEvent = (EditableCalendarEvent) calendarEvent; Date newFromTime = event.getNewStart(); // Update event dates long length = editableEvent.getEnd().getTime() - editableEvent.getStart().getTime(); setDates(editableEvent, newFromTime, new Date(newFromTime.getTime() + length)); } }
3.68
zilla_HpackContext_staticIndex7
// Index in static table for the given name of length 7 private static int staticIndex7(DirectBuffer name) { switch (name.getByte(6)) { case 'd': if (STATIC_TABLE[2].name.equals(name)) // :method { return 2; } break; case 'e': if (STATIC_TABLE[6].name.equals(name)) // :scheme { return 6; } break; case 'h': if (STATIC_TABLE[52].name.equals(name)) // refresh { return 52; } break; case 'r': if (STATIC_TABLE[51].name.equals(name)) // referer { return 51; } break; case 's': if (STATIC_TABLE[8].name.equals(name)) // :status { return 8; } if (STATIC_TABLE[36].name.equals(name)) // expires { return 36; } break; } return -1; }
3.68
flink_HsSubpartitionConsumerMemoryDataManager_peekNextToConsumeDataType
/** * Check whether the head of {@link #unConsumedBuffers} is the buffer to be consumed next time. * If so, return the next buffer's data type. * * @param nextToConsumeIndex index of the buffer to be consumed next time. * @param buffersToRecycle buffers to recycle if needed. * @return If the head of {@link #unConsumedBuffers} is target, return the buffer's data type. * Otherwise, return {@link Buffer.DataType#NONE}. */ @SuppressWarnings("FieldAccessNotGuarded") // Note that: callWithLock ensure that code block guarded by resultPartitionReadLock and // consumerLock. @Override public Buffer.DataType peekNextToConsumeDataType( int nextToConsumeIndex, Collection<Buffer> buffersToRecycle) { return callWithLock(() -> peekNextToConsumeDataTypeInternal(nextToConsumeIndex)); }
3.68
flink_BinarySegmentUtils_bitSet
/** * set bit from segments. * * @param segments target segments. * @param baseOffset bits base offset. * @param index bit index from base offset. */ public static void bitSet(MemorySegment[] segments, int baseOffset, int index) { if (segments.length == 1) { int offset = baseOffset + byteIndex(index); MemorySegment segment = segments[0]; byte current = segment.get(offset); current |= (1 << (index & BIT_BYTE_INDEX_MASK)); segment.put(offset, current); } else { bitSetMultiSegments(segments, baseOffset, index); } }
3.68
hmily_HmilyTransactionHolder_cacheHmilyParticipant
/** * Cache hmily participant. * * @param hmilyParticipant the hmily participant */ public void cacheHmilyParticipant(final HmilyParticipant hmilyParticipant) { if (Objects.isNull(hmilyParticipant)) { return; } HmilyParticipantCacheManager.getInstance().cacheHmilyParticipant(hmilyParticipant); }
3.68
hmily_HmilyRoundRobinLoadBalance_select
/** * Use load balancing to select invoker. * * @param invokeContext invokeContext * @return Invoker * @throws NoInvokerException NoInvokerException */ @Override public Invoker<T> select(final InvokeContext invokeContext) throws NoInvokerException { List<Invoker<T>> staticWeightInvokers = staticWeightInvokersCache; if (staticWeightInvokers != null && !staticWeightInvokers.isEmpty()) { Invoker<T> invoker = staticWeightInvokers.get((staticWeightSequence.getAndIncrement() & Integer.MAX_VALUE) % staticWeightInvokers.size()); if (invoker.isAvailable()) { return invoker; } ServantInvokerAliveStat stat = ServantInvokerAliveChecker.get(invoker.getUrl()); if (stat.isAlive() || (stat.getLastRetryTime() + (config.getTryTimeInterval() * 1000)) < System.currentTimeMillis()) { LOGGER.info("try to use inactive invoker|" + invoker.getUrl().toIdentityString()); stat.setLastRetryTime(System.currentTimeMillis()); return invoker; } } List<Invoker<T>> sortedInvokers = sortedInvokersCache; if (CollectionUtils.isEmpty(sortedInvokers)) { throw new NoInvokerException("no such active connection invoker"); } List<Invoker<T>> list = new ArrayList<Invoker<T>>(); for (Invoker<T> invoker : sortedInvokers) { if (!invoker.isAvailable()) { ServantInvokerAliveStat stat = ServantInvokerAliveChecker.get(invoker.getUrl()); if (stat.isAlive() || (stat.getLastRetryTime() + (config.getTryTimeInterval() * 1000)) < System.currentTimeMillis()) { list.add(invoker); } } else { list.add(invoker); } } //TODO When all is not available. Whether to randomly extract one if (list.isEmpty()) { throw new NoInvokerException(config.getSimpleObjectName() + " try to select active invoker, size=" + sortedInvokers.size() + ", no such active connection invoker"); } Invoker<T> invoker = list.get((sequence.getAndIncrement() & Integer.MAX_VALUE) % list.size()); if (!invoker.isAvailable()) { LOGGER.info("try to use inactive invoker|" + invoker.getUrl().toIdentityString()); ServantInvokerAliveChecker.get(invoker.getUrl()).setLastRetryTime(System.currentTimeMillis()); } return HmilyLoadBalanceUtils.doSelect(invoker, sortedInvokersCache); }
3.68
hbase_Procedure_lockedWhenLoading
/** * Will only be called when loading procedures from procedure store, where we need to record * whether the procedure has already held a lock. Later we will call {@link #restoreLock(Object)} * to actually acquire the lock. */ final void lockedWhenLoading() { this.lockedWhenLoading = true; }
3.68
hadoop_DeletionTaskRecoveryInfo_getTask
/** * Return the recovered DeletionTask. * * @return the recovered DeletionTask. */ public DeletionTask getTask() { return task; }
3.68
hbase_ByteBufferUtils_searchDelimiterIndexInReverse
/** * Find index of passed delimiter walking from end of buffer backwards. * @return Index of delimiter */ public static int searchDelimiterIndexInReverse(ByteBuffer b, int offset, int length, int delimiter) { for (int i = offset + length - 1; i >= offset; i--) { if (b.get(i) == delimiter) { return i; } } return -1; }
3.68
framework_VFlash_setArchive
/** * This attribute may be used to specify a space-separated list of URIs for * archives containing resources relevant to the object, which may include * the resources specified by the classid and data attributes. Preloading * archives will generally result in reduced load times for objects. * Archives specified as relative URIs should be interpreted relative to the * codebase attribute. * * @param archive * Space-separated list of URIs with resources relevant to the * object */ public void setArchive(String archive) { if (this.archive != archive) { this.archive = archive; needsRebuild = true; } }
3.68
hadoop_PartitionResourcesInfo_setUserAmLimit
/** * @param userAmLimit the userAmLimit to set */ public void setUserAmLimit(ResourceInfo userAmLimit) { this.userAmLimit = userAmLimit; }
3.68
flink_CommonTestUtils_createTempFile
/** * Creates a temporary file that contains the given string. The file is written with the * platform's default encoding. * * <p>The temp file is automatically deleted on JVM exit. * * @param contents The contents to be written to the file. * @return The temp file URI. */ public static String createTempFile(String contents) throws IOException { File f = File.createTempFile("flink_test_", ".tmp"); f.deleteOnExit(); try (BufferedWriter out = new BufferedWriter(new FileWriter(f))) { out.write(contents); } return f.toURI().toString(); }
3.68
framework_SelectorPath_generateFragment
/** * Generates a recursive ElementQuery for given path fragment * * @param fragment * Query fragment * @return ElementQuery java code as a String */ private String generateFragment(String fragment) { // Get Element.class -name String elementClass = getComponentName(fragment) + "Element.class"; String queryFragment = "$(" + elementClass + ")"; for (SelectorPredicate p : SelectorPredicate .extractPredicates(fragment)) { // Add in predicates like .caption and .id queryFragment += "." + p.getName() + "(\"" + p.getValue() + "\")"; } return queryFragment; }
3.68
hbase_StorageClusterStatusModel_addLiveNode
/** * Add a live node to the cluster representation. * @param name the region server name * @param startCode the region server's start code * @param heapSizeMB the current heap size, in MB * @param maxHeapSizeMB the maximum heap size, in MB */ public Node addLiveNode(String name, long startCode, int heapSizeMB, int maxHeapSizeMB) { Node node = new Node(name, startCode); node.setHeapSizeMB(heapSizeMB); node.setMaxHeapSizeMB(maxHeapSizeMB); liveNodes.add(node); return node; }
3.68
rocketmq-connect_WorkerDirectTask_execute
/** * execute poll and send record */ @Override protected void execute() { while (isRunning()) { updateCommittableOffsets(); if (shouldPause()) { onPause(); try { // wait unpause if (awaitUnpause()) { onResume(); } continue; } catch (InterruptedException e) { // do exception } } try { Collection<ConnectRecord> toSendEntries = sourceTask.poll(); if (!toSendEntries.isEmpty()) { sendRecord(toSendEntries); } } catch (Exception e) { log.error("Direct task runtime exception", e); finalOffsetCommit(true); onFailure(e); } } }
3.68
hbase_BloomFilterUtil_checkBit
/** * Check if bit at specified index is 1. * @param pos index of bit * @return true if bit at specified index is 1, false if 0. */ static boolean checkBit(int pos, ByteBuff bloomBuf, int bloomOffset) { int bytePos = pos >> 3; // pos / 8 int bitPos = pos & 0x7; // pos % 8 byte curByte = bloomBuf.get(bloomOffset + bytePos); curByte &= bitvals[bitPos]; return (curByte != 0); }
3.68
morf_Column_toStringHelper
/** * Helper for {@link Object#toString()} implementations. * * @return String representation of the column. */ public default String toStringHelper() { return new StringBuilder() .append("Column-").append(getName()) .append("-").append(getType()) .append("-").append(getType().hasWidth() ? getWidth() : "") .append("-").append(getType().hasScale() ? getScale() : "") .append("-").append(isNullable() ? "null" : "notNull") .append("-").append(isPrimaryKey() ? "pk" : "") .append("-").append(isAutoNumbered() ? "autonum" : "") .append("-").append(getAutoNumberStart()) .append("-").append(getDefaultValue()) .toString(); }
3.68
hadoop_DockerClientConfigHandler_readCredentialsFromConfigFile
/** * Read the Docker client configuration and extract the auth tokens into * Credentials. * * @param configFile the Path to the Docker client configuration. * @param conf the Configuration object, needed by the FileSystem. * @param applicationId the application ID to associate the Credentials with. * @return the populated Credential object with the Docker Tokens. * @throws IOException if the file can not be read. */ public static Credentials readCredentialsFromConfigFile(Path configFile, Configuration conf, String applicationId) throws IOException { // Read the config file String contents = null; configFile = new Path(configFile.toUri()); FileSystem fs = configFile.getFileSystem(conf); if (fs != null) { FSDataInputStream fileHandle = fs.open(configFile); if (fileHandle != null) { contents = IOUtils.toString(fileHandle, StandardCharsets.UTF_8); } } if (contents == null) { throw new IOException("Failed to read Docker client configuration: " + configFile); } // Parse the JSON and create the Tokens/Credentials. ObjectMapper mapper = new ObjectMapper(); JsonFactory factory = mapper.getFactory(); JsonParser parser = factory.createParser(contents); JsonNode rootNode = mapper.readTree(parser); Credentials credentials = new Credentials(); if (rootNode.has(CONFIG_AUTHS_KEY)) { Iterator<String> iter = rootNode.get(CONFIG_AUTHS_KEY).fieldNames(); for (; iter.hasNext();) { String registryUrl = iter.next(); String registryCred = rootNode.get(CONFIG_AUTHS_KEY) .get(registryUrl) .get(CONFIG_AUTH_KEY) .asText(); TokenIdentifier tokenId = new DockerCredentialTokenIdentifier(registryUrl, applicationId); Token<DockerCredentialTokenIdentifier> token = new Token<>(tokenId.getBytes(), registryCred.getBytes(StandardCharsets.UTF_8), tokenId.getKind(), new Text(registryUrl)); credentials.addToken( new Text(registryUrl + "-" + applicationId), token); LOG.info("Token read from Docker client configuration file: " + token.toString()); } } return credentials; }
3.68
framework_SQLContainer_getPage
/** * Fetches a page from the data source based on the values of pageLength and * currentOffset. Also updates the set of primary keys, used in * identification of RowItems. */ private void getPage() { updateCount(); ResultSet rs = null; ResultSetMetaData rsmd = null; cachedItems.clear(); itemIndexes.clear(); try { try { queryDelegate.setOrderBy(sorters); } catch (UnsupportedOperationException e) { /* The query delegate doesn't support sorting. */ /* No need to do anything. */ getLogger().log(Level.FINE, "The query delegate doesn't support sorting", e); } queryDelegate.beginTransaction(); int fetchedRows = pageLength * CACHE_RATIO + cacheOverlap; rs = queryDelegate.getResults(currentOffset, fetchedRows); rsmd = rs.getMetaData(); List<String> pKeys = queryDelegate.getPrimaryKeyColumns(); // } /* Create new items and column properties */ ColumnProperty cp = null; int rowCount = currentOffset; if (!queryDelegate.implementationRespectsPagingLimits()) { rowCount = currentOffset = 0; setPageLengthInternal(size); } while (rs.next()) { List<ColumnProperty> itemProperties = new ArrayList<ColumnProperty>(); /* Generate row itemId based on primary key(s) */ Object[] itemId = new Object[pKeys.size()]; for (int i = 0; i < pKeys.size(); i++) { itemId[i] = rs.getObject(pKeys.get(i)); } RowId id = null; if (pKeys.isEmpty()) { id = new ReadOnlyRowId(rs.getRow()); } else { id = new RowId(itemId); } List<String> propertiesToAdd = new ArrayList<String>( propertyIds); if (!removedItems.containsKey(id)) { for (int i = 1; i <= rsmd.getColumnCount(); i++) { if (!isColumnIdentifierValid(rsmd.getColumnLabel(i))) { continue; } String colName = rsmd.getColumnLabel(i); Object value = rs.getObject(i); Class<?> type = value != null ? value.getClass() : Object.class; if (value == null) { for (String propName : propertyTypes.keySet()) { if (propName.equals(rsmd.getColumnLabel(i))) { type = propertyTypes.get(propName); break; } } } /* * In case there are more than one column with the same * name, add only the first one. This can easily happen * if you join many tables where each table has an ID * column. */ if (propertiesToAdd.contains(colName)) { cp = new ColumnProperty(colName, propertyReadOnly.get(colName), propertyPersistable.get(colName), propertyNullable.get(colName), propertyPrimaryKey.get(colName), value, type); itemProperties.add(cp); propertiesToAdd.remove(colName); } } /* Cache item */ itemIndexes.put(rowCount, id); // if an item with the id is contained in the modified // cache, then use this record and add it to the cached // items. Otherwise create a new item int modifiedIndex = indexInModifiedCache(id); if (modifiedIndex != -1) { cachedItems.put(id, modifiedItems.get(modifiedIndex)); } else { cachedItems.put(id, new RowItem(this, id, itemProperties)); } rowCount++; } } rs.getStatement().close(); rs.close(); queryDelegate.commit(); getLogger().log(Level.FINER, "Fetched {0} rows starting from {1}", new Object[] { fetchedRows, currentOffset }); } catch (SQLException e) { getLogger().log(Level.WARNING, "Failed to fetch rows, rolling back", e); try { queryDelegate.rollback(); } catch (SQLException e1) { getLogger().log(Level.SEVERE, "Failed to roll back", e1); } try { if (rs != null) { if (rs.getStatement() != null) { rs.getStatement().close(); rs.close(); } } } catch (SQLException e1) { getLogger().log(Level.WARNING, "Failed to close session", e1); } throw new RuntimeException("Failed to fetch page.", e); }
3.68
hbase_Compactor_createScanner
/** * @param store The store. * @param scanners Store file scanners. * @param smallestReadPoint Smallest MVCC read point. * @param earliestPutTs Earliest put across all files. * @param dropDeletesFromRow Drop deletes starting with this row, inclusive. Can be null. * @param dropDeletesToRow Drop deletes ending with this row, exclusive. Can be null. * @return A compaction scanner. */ protected InternalScanner createScanner(HStore store, ScanInfo scanInfo, List<StoreFileScanner> scanners, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException { return new StoreScanner(store, scanInfo, scanners, smallestReadPoint, earliestPutTs, dropDeletesFromRow, dropDeletesToRow); }
3.68
pulsar_ManagedLedgerConfig_setBookKeeperEnsemblePlacementPolicyProperties
/** * Managed-ledger can setup different custom EnsemblePlacementPolicy which needs * bookKeeperEnsemblePlacementPolicy-Properties. * * @param bookKeeperEnsemblePlacementPolicyProperties */ public void setBookKeeperEnsemblePlacementPolicyProperties( Map<String, Object> bookKeeperEnsemblePlacementPolicyProperties) { this.bookKeeperEnsemblePlacementPolicyProperties = bookKeeperEnsemblePlacementPolicyProperties; }
3.68
pulsar_ManagedLedgerConfig_isUnackedRangesOpenCacheSetEnabled
/** * should use {@link ConcurrentOpenLongPairRangeSet} to store unacked ranges. * @return */ public boolean isUnackedRangesOpenCacheSetEnabled() { return unackedRangesOpenCacheSetEnabled; }
3.68
morf_ViewChanges_viewToName
/** * @return the name of a given view. */ private Function<View, String> viewToName() { return new Function<View, String>() { @Override public String apply(View view) { return view.getName(); } }; }
3.68
framework_VComboBox_isJustClosed
/** * Was the popup just closed? * * @return true if popup was just closed */ public boolean isJustClosed() { debug("VComboBox.SP: justClosed()"); final long now = new Date().getTime(); return lastAutoClosed > 0 && now - lastAutoClosed < 200; }
3.68
hadoop_DiskBalancerDataNode_getVolumeSets
/** * Returns the Volume sets on this node. * * @return a Map of VolumeSets */ public Map<String, DiskBalancerVolumeSet> getVolumeSets() { return volumeSets; }
3.68
morf_AliasedField_greaterThanOrEqualTo
/** * @param value object to compare to (right hand side) * @return a {@link Criterion} for a greater than or equal to expression of this field. */ public Criterion greaterThanOrEqualTo(Object value) { return Criterion.greaterThanOrEqualTo(this, value); }
3.68
hudi_AvroSchemaEvolutionUtils_reconcileSchema
/** * Support reconcile from a new avroSchema. * 1) incoming data has missing columns that were already defined in the table –> null values will be injected into missing columns * 2) incoming data contains new columns not defined yet in the table -> columns will be added to the table schema (incoming dataframe?) * 3) incoming data has missing columns that are already defined in the table and new columns not yet defined in the table -> * new columns will be added to the table schema, missing columns will be injected with null values * 4) support type change * 5) support nested schema change. * Notice: * the incoming schema should not have delete/rename semantics. * for example: incoming schema: int a, int b, int d; oldTableSchema int a, int b, int c, int d * we must guarantee the column c is missing semantic, instead of delete semantic. * * @param incomingSchema implicitly evolution of avro when hoodie write operation * @param oldTableSchema old internalSchema * @return reconcile Schema */ public static InternalSchema reconcileSchema(Schema incomingSchema, InternalSchema oldTableSchema) { /* If incoming schema is null, we fall back on table schema. */ if (incomingSchema.getType() == Schema.Type.NULL) { return oldTableSchema; } InternalSchema inComingInternalSchema = convert(incomingSchema); // check column add/missing List<String> colNamesFromIncoming = inComingInternalSchema.getAllColsFullName(); List<String> colNamesFromOldSchema = oldTableSchema.getAllColsFullName(); List<String> diffFromOldSchema = colNamesFromOldSchema.stream().filter(f -> !colNamesFromIncoming.contains(f)).collect(Collectors.toList()); List<String> diffFromEvolutionColumns = colNamesFromIncoming.stream().filter(f -> !colNamesFromOldSchema.contains(f)).collect(Collectors.toList()); // check type change. List<String> typeChangeColumns = colNamesFromIncoming .stream() .filter(f -> colNamesFromOldSchema.contains(f) && !inComingInternalSchema.findType(f).equals(oldTableSchema.findType(f))) .collect(Collectors.toList()); if (colNamesFromIncoming.size() == colNamesFromOldSchema.size() && diffFromOldSchema.size() == 0 && typeChangeColumns.isEmpty()) { return oldTableSchema; } // Remove redundancy from diffFromEvolutionSchema. // for example, now we add a struct col in evolvedSchema, the struct col is " user struct<name:string, age:int> " // when we do diff operation: user, user.name, user.age will appeared in the resultSet which is redundancy, user.name and user.age should be excluded. // deal with add operation TreeMap<Integer, String> finalAddAction = new TreeMap<>(); for (int i = 0; i < diffFromEvolutionColumns.size(); i++) { String name = diffFromEvolutionColumns.get(i); int splitPoint = name.lastIndexOf("."); String parentName = splitPoint > 0 ? name.substring(0, splitPoint) : ""; if (!parentName.isEmpty() && diffFromEvolutionColumns.contains(parentName)) { // find redundancy, skip it continue; } finalAddAction.put(inComingInternalSchema.findIdByName(name), name); } TableChanges.ColumnAddChange addChange = TableChanges.ColumnAddChange.get(oldTableSchema); finalAddAction.entrySet().stream().forEach(f -> { String name = f.getValue(); int splitPoint = name.lastIndexOf("."); String parentName = splitPoint > 0 ? name.substring(0, splitPoint) : ""; String rawName = splitPoint > 0 ? name.substring(splitPoint + 1) : name; // try to infer add position. java.util.Optional<String> inferPosition = colNamesFromIncoming.stream().filter(c -> c.lastIndexOf(".") == splitPoint && c.startsWith(parentName) && inComingInternalSchema.findIdByName(c) > inComingInternalSchema.findIdByName(name) && oldTableSchema.findIdByName(c) > 0).sorted((s1, s2) -> oldTableSchema.findIdByName(s1) - oldTableSchema.findIdByName(s2)).findFirst(); addChange.addColumns(parentName, rawName, inComingInternalSchema.findType(name), null); inferPosition.map(i -> addChange.addPositionChange(name, i, "before")); }); // do type evolution. InternalSchema internalSchemaAfterAddColumns = SchemaChangeUtils.applyTableChanges2Schema(oldTableSchema, addChange); TableChanges.ColumnUpdateChange typeChange = TableChanges.ColumnUpdateChange.get(internalSchemaAfterAddColumns); typeChangeColumns.stream().filter(f -> !inComingInternalSchema.findType(f).isNestedType()).forEach(col -> { typeChange.updateColumnType(col, inComingInternalSchema.findType(col)); }); return SchemaChangeUtils.applyTableChanges2Schema(internalSchemaAfterAddColumns, typeChange); }
3.68
framework_AbstractColorPicker_setDefaultCaptionEnabled
/** * Set true if the component should show a default caption (css-code for the * currently selected color, e.g. #ffffff) when no other caption is * available. * * @param enabled */ public void setDefaultCaptionEnabled(boolean enabled) { getState().showDefaultCaption = enabled; }
3.68
hadoop_BalanceProcedure_name
/** * Get the procedure name. */ public String name() { return name; }
3.68
flink_BloomFilter_fromBytes
/** Deserializing bytes array to BloomFilter. Currently, only heap memory is supported. */ public static BloomFilter fromBytes(byte[] bytes) { int numHashFunctions = UNSAFE.getInt(bytes, BYTE_ARRAY_BASE_OFFSET); int byteSize = UNSAFE.getInt(bytes, BYTE_ARRAY_BASE_OFFSET + 4); byte[] data = new byte[byteSize]; UNSAFE.copyMemory( bytes, BYTE_ARRAY_BASE_OFFSET + 8, data, BYTE_ARRAY_BASE_OFFSET, byteSize); BitSet bitSet = new BitSet(byteSize); bitSet.setMemorySegment(MemorySegmentFactory.wrap(data), 0); return new BloomFilter(bitSet, numHashFunctions); }
3.68
zxing_MatrixUtil_embedTypeInfo
// Embed type information. On success, modify the matrix. static void embedTypeInfo(ErrorCorrectionLevel ecLevel, int maskPattern, ByteMatrix matrix) throws WriterException { BitArray typeInfoBits = new BitArray(); makeTypeInfoBits(ecLevel, maskPattern, typeInfoBits); for (int i = 0; i < typeInfoBits.getSize(); ++i) { // Place bits in LSB to MSB order. LSB (least significant bit) is the last value in // "typeInfoBits". boolean bit = typeInfoBits.get(typeInfoBits.getSize() - 1 - i); // Type info bits at the left top corner. See 8.9 of JISX0510:2004 (p.46). int[] coordinates = TYPE_INFO_COORDINATES[i]; int x1 = coordinates[0]; int y1 = coordinates[1]; matrix.set(x1, y1, bit); int x2; int y2; if (i < 8) { // Right top corner. x2 = matrix.getWidth() - i - 1; y2 = 8; } else { // Left bottom corner. x2 = 8; y2 = matrix.getHeight() - 7 + (i - 8); } matrix.set(x2, y2, bit); } }
3.68
dubbo_AccessLogData_setArguments
/** * Sets invocation arguments * * @param arguments */ public void setArguments(Object[] arguments) { set(ARGUMENTS, arguments != null ? Arrays.copyOf(arguments, arguments.length) : null); }
3.68
flink_AllWindowedStream_max
/** * Applies an aggregation that gives the maximum value of the pojo data stream at the given * field expression for every window. A field expression is either the name of a public field or * a getter method with parentheses of the {@link DataStream DataStreams} underlying type. A dot * can be used to drill down into objects, as in {@code "field1.getInnerField2()" }. * * @param field The field expression based on which the aggregation will be applied. * @return The transformed DataStream. */ public SingleOutputStreamOperator<T> max(String field) { return aggregate( new ComparableAggregator<>( field, input.getType(), AggregationFunction.AggregationType.MAX, false, input.getExecutionConfig())); }
3.68
hadoop_AuditingIntegration_updateCommonContextOnCommitterEntry
/** * Add jobID to current context; also * task attempt ID if set. */ public static void updateCommonContextOnCommitterEntry( ManifestCommitterConfig committerConfig) { CommonAuditContext context = currentAuditContext(); context.put(PARAM_JOB_ID, committerConfig.getJobUniqueId()); // maybe the task attempt ID. if (!committerConfig.getTaskAttemptId().isEmpty()) { context.put(CONTEXT_ATTR_TASK_ATTEMPT_ID, committerConfig.getTaskAttemptId()); } }
3.68
querydsl_AbstractOracleQuery_connectByNocyclePrior
/** * CONNECT BY specifies the relationship between parent rows and child rows of the hierarchy. * * @param cond condition * @return the current object */ public C connectByNocyclePrior(Predicate cond) { return addFlag(Position.BEFORE_ORDER, CONNECT_BY_NOCYCLE_PRIOR, cond); }
3.68
flink_MemorySegment_getIntLittleEndian
/** * Reads an int value (32bit, 4 bytes) from the given position, in little-endian byte order. * This method's speed depends on the system's native byte order, and it is possibly slower than * {@link #getInt(int)}. For most cases (such as transient storage in memory or serialization * for I/O and network), it suffices to know that the byte order in which the value is written * is the same as the one in which it is read, and {@link #getInt(int)} is the preferable * choice. * * @param index The position from which the value will be read. * @return The int value at the given position. * @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the * segment size minus 4. */ public int getIntLittleEndian(int index) { if (LITTLE_ENDIAN) { return getInt(index); } else { return Integer.reverseBytes(getInt(index)); } }
3.68
flink_NFAStateNameHandler_checkNameUniqueness
/** * Checks if the given name is already used or not. If yes, it throws a {@link * MalformedPatternException}. * * @param name The name to be checked. */ public void checkNameUniqueness(String name) { if (usedNames.contains(name)) { throw new MalformedPatternException( "Duplicate pattern name: " + name + ". Names must be unique."); } usedNames.add(name); }
3.68
hadoop_SuccessData_dumpMetrics
/** * Dump the metrics (if any) to a string. * The metrics are sorted for ease of viewing. * @param prefix prefix before every entry * @param middle string between key and value * @param suffix suffix to each entry * @return the dumped string */ public String dumpMetrics(String prefix, String middle, String suffix) { return joinMap(metrics, prefix, middle, suffix); }
3.68
flink_BlobKey_writeToOutputStream
/** * Auxiliary method to write this BLOB key to an output stream. * * @param outputStream the output stream to write the BLOB key to * @throws IOException thrown if an I/O error occurs while writing the BLOB key */ void writeToOutputStream(final OutputStream outputStream) throws IOException { outputStream.write(this.key); outputStream.write(this.type.ordinal()); outputStream.write(this.random.getBytes()); }
3.68
flink_ExtractionUtils_primitiveToWrapper
/** * Converts the specified primitive Class object to its corresponding wrapper Class object. * * <p>NOTE: From v2.2, this method handles {@code Void.TYPE}, returning {@code Void.TYPE}. * * @param cls the class to convert, may be null * @return the wrapper class for {@code cls} or {@code cls} if {@code cls} is not a primitive. * {@code null} if null input. * @since 2.1 */ public static Class<?> primitiveToWrapper(final Class<?> cls) { Class<?> convertedClass = cls; if (cls != null && cls.isPrimitive()) { convertedClass = primitiveWrapperMap.get(cls); } return convertedClass; }
3.68
pulsar_ConnectionPool_connectToAddress
/** * Attempt to establish a TCP connection to an already resolved single IP address. */ private CompletableFuture<Channel> connectToAddress(InetSocketAddress logicalAddress, InetSocketAddress physicalAddress, InetSocketAddress unresolvedPhysicalAddress, InetSocketAddress sniHost) { if (clientConfig.isUseTls()) { return toCompletableFuture(bootstrap.register()) .thenCompose(channel -> channelInitializerHandler .initTls(channel, sniHost != null ? sniHost : physicalAddress)) .thenCompose(channelInitializerHandler::initSocks5IfConfig) .thenCompose(ch -> channelInitializerHandler.initializeClientCnx(ch, logicalAddress, unresolvedPhysicalAddress)) .thenCompose(channel -> toCompletableFuture(channel.connect(physicalAddress))); } else { return toCompletableFuture(bootstrap.register()) .thenCompose(channelInitializerHandler::initSocks5IfConfig) .thenCompose(ch -> channelInitializerHandler.initializeClientCnx(ch, logicalAddress, unresolvedPhysicalAddress)) .thenCompose(channel -> toCompletableFuture(channel.connect(physicalAddress))); } }
3.68
morf_Join_getTable
/** * Get the table to join to. * * @return the table */ public TableReference getTable() { return table; }
3.68
pulsar_AuthenticationProviderOpenID_authenticateToken
/** * Authenticate the parameterized JWT. * * @param token - a nonnull JWT to authenticate * @return a fully authenticated JWT, or AuthenticationException if the JWT is proven to be invalid in any way */ private CompletableFuture<DecodedJWT> authenticateToken(String token) { if (token == null) { incrementFailureMetric(AuthenticationExceptionCode.ERROR_DECODING_JWT); return CompletableFuture.failedFuture(new AuthenticationException("JWT cannot be null")); } final DecodedJWT jwt; try { jwt = decodeJWT(token); } catch (AuthenticationException e) { incrementFailureMetric(AuthenticationExceptionCode.ERROR_DECODING_JWT); return CompletableFuture.failedFuture(e); } return verifyIssuerAndGetJwk(jwt) .thenCompose(jwk -> { try { if (!jwt.getAlgorithm().equals(jwk.getAlgorithm())) { incrementFailureMetric(AuthenticationExceptionCode.ALGORITHM_MISMATCH); return CompletableFuture.failedFuture( new AuthenticationException("JWK's alg [" + jwk.getAlgorithm() + "] does not match JWT's alg [" + jwt.getAlgorithm() + "]")); } // Verify the JWT signature // Throws exception if any verification check fails return CompletableFuture .completedFuture(verifyJWT(jwk.getPublicKey(), jwt.getAlgorithm(), jwt)); } catch (InvalidPublicKeyException e) { incrementFailureMetric(AuthenticationExceptionCode.INVALID_PUBLIC_KEY); return CompletableFuture.failedFuture( new AuthenticationException("Invalid public key: " + e.getMessage())); } catch (AuthenticationException e) { return CompletableFuture.failedFuture(e); } }); }
3.68
flink_RocksDBMemoryConfiguration_setWriteBufferRatio
/** * Sets the fraction of the total memory to be used for write buffers. This only has an effect * is either {@link #setUseManagedMemory(boolean)} or {@link #setFixedMemoryPerSlot(MemorySize)} * are set. * * <p>See {@link RocksDBOptions#WRITE_BUFFER_RATIO} for details. */ public void setWriteBufferRatio(double writeBufferRatio) { Preconditions.checkArgument( writeBufferRatio > 0 && writeBufferRatio < 1.0, "Write Buffer ratio %s must be in (0, 1)", writeBufferRatio); this.writeBufferRatio = writeBufferRatio; }
3.68
framework_ServiceInitEvent_addDependencyFilter
/** * Adds a new dependency filter that will be used by this service. * * @param dependencyFilter * the dependency filter to add, not <code>null</code> * * @since 8.1 */ public void addDependencyFilter(DependencyFilter dependencyFilter) { Objects.requireNonNull(dependencyFilter, "Dependency filter cannot be null"); addedDependencyFilters.add(dependencyFilter); }
3.68
framework_BasicEvent_fireEventChange
/** * Fires an event change event to the listeners. Should be triggered when * some property of the event changes. */ protected void fireEventChange() { EventChangeEvent event = new EventChangeEvent(this); for (EventChangeListener listener : listeners) { listener.eventChange(event); } }
3.68
framework_DateCell_setHorizontalSized
/** * @param isHorizontalSized * if true, this DateCell is sized with CSS and not via * {@link #setWidthPX(int)} */ public void setHorizontalSized(boolean isHorizontalSized) { if (isHorizontalSized) { addStyleDependentName("Hsized"); width = getOffsetWidth() - WidgetUtil.measureHorizontalBorder(getElement()); // Update moveWidth for any DateCellDayEvent child updateEventCellsWidth(); recalculateEventWidths(); } else { removeStyleDependentName("Hsized"); } }
3.68
flink_AsynchronousBlockWriter_getNextReturnedBlock
/** * Gets the next memory segment that has been written and is available again. This method blocks * until such a segment is available, or until an error occurs in the writer, or the writer is * closed. * * <p>NOTE: If this method is invoked without any segment ever returning (for example, because * the {@link #writeBlock(MemorySegment)} method has not been invoked accordingly), the method * may block forever. * * @return The next memory segment from the writers's return queue. * @throws IOException Thrown, if an I/O error occurs in the writer while waiting for the * request to return. */ @Override public MemorySegment getNextReturnedBlock() throws IOException { try { while (true) { final MemorySegment next = returnSegments.poll(1000, TimeUnit.MILLISECONDS); if (next != null) { return next; } else { if (this.closed) { throw new IOException("The writer has been closed."); } checkErroneous(); } } } catch (InterruptedException e) { throw new IOException( "Writer was interrupted while waiting for the next returning segment."); } }
3.68
flink_SerializedCompositeKeyBuilder_buildCompositeKeyNamespace
/** * Returns a serialized composite key, from the key and key-group provided in a previous call to * {@link #setKeyAndKeyGroup(Object, int)} and the given namespace. * * @param namespace the namespace to concatenate for the serialized composite key bytes. * @param namespaceSerializer the serializer to obtain the serialized form of the namespace. * @param <N> the type of the namespace. * @return the bytes for the serialized composite key of key-group, key, namespace. */ @Nonnull public <N> byte[] buildCompositeKeyNamespace( @Nonnull N namespace, @Nonnull TypeSerializer<N> namespaceSerializer) { try { serializeNamespace(namespace, namespaceSerializer); return keyOutView.getCopyOfBuffer(); } catch (IOException shouldNeverHappen) { throw new FlinkRuntimeException(shouldNeverHappen); } }
3.68
hadoop_FederationStateStoreFacade_setDelegationTokenSeqNum
/** * Set SeqNum from stateStore. * * @param seqNum delegationTokenSequenceNumber. */ public void setDelegationTokenSeqNum(int seqNum) { stateStore.setDelegationTokenSeqNum(seqNum); }
3.68
hadoop_RenameFilesStage_getTotalFileSize
/** * Get the total file size of the committed task. * @return a number greater than or equal to zero. */ public synchronized long getTotalFileSize() { return totalFileSize; }
3.68
framework_MenuBar_getFirstItem
/** * Gets the first item from the menu or null if no items. * * @since 7.2.6 * @return the first item from the menu or null if no items. */ public MenuItem getFirstItem() { return items != null && !items.isEmpty() ? items.get(0) : null; }
3.68
druid_MySQL8DateTimeSqlTypeFilter_resultSet_getObject
/** * 针对mysql jdbc 8.0.23及以上版本,通过该方法控制将对象类型转换成原来的类型 * * @param chain * @param result * @param columnLabel * @return * @throws SQLException * @see java.sql.ResultSet#getObject(String) */ @Override public Object resultSet_getObject(FilterChain chain, ResultSetProxy result, String columnLabel) throws SQLException { return getObjectReplaceLocalDateTime(super.resultSet_getObject(chain, result, columnLabel)); }
3.68
framework_SessionDestroyEvent_getSession
/** * Gets the Vaadin service session that is no longer used. * * @return the Vaadin service session */ public VaadinSession getSession() { return session; }
3.68
hadoop_DiskBalancerDataNode_setDataNodePort
/** * Sets the DataNode Port number. * * @param port - Datanode Port Number */ public void setDataNodePort(int port) { this.dataNodePort = port; }
3.68
flink_TypeExtractor_getAllDeclaredFields
/** * Recursively determine all declared fields This is required because class.getFields() is not * returning fields defined in parent classes. * * @param clazz class to be analyzed * @param ignoreDuplicates if true, in case of duplicate field names only the lowest one in a * hierarchy will be returned; throws an exception otherwise * @return list of fields */ @PublicEvolving public static List<Field> getAllDeclaredFields(Class<?> clazz, boolean ignoreDuplicates) { List<Field> result = new ArrayList<>(); while (clazz != null) { Field[] fields = clazz.getDeclaredFields(); for (Field field : fields) { if (Modifier.isTransient(field.getModifiers()) || Modifier.isStatic(field.getModifiers())) { continue; // we have no use for transient or static fields } if (hasFieldWithSameName(field.getName(), result)) { if (ignoreDuplicates) { continue; } else { throw new InvalidTypesException( "The field " + field + " is already contained in the hierarchy of the " + clazz + "." + "Please use unique field names through your classes hierarchy"); } } result.add(field); } clazz = clazz.getSuperclass(); } return result; }
3.68
flink_Costs_setCpuCost
/** * Sets the cost for the CPU. * * @param cost The CPU Cost. */ public void setCpuCost(double cost) { if (cost == UNKNOWN || cost >= 0) { this.cpuCost = cost; } else { throw new IllegalArgumentException(); } }
3.68
flink_ResourceManagerFactory_getEffectiveConfigurationForResourceManagerAndRuntimeServices
/** * Configuration changes in this method will be visible to both {@link ResourceManager} and * {@link ResourceManagerRuntimeServices}. This can be overwritten by {@link * #getEffectiveConfigurationForResourceManager}. */ protected Configuration getEffectiveConfigurationForResourceManagerAndRuntimeServices( final Configuration configuration) { return configuration; }
3.68
framework_DataCommunicator_getDataProvider
/** * Gets the current data provider from this DataCommunicator. * * @return the data provider */ public DataProvider<T, ?> getDataProvider() { return dataProvider; }
3.68
framework_DefaultSQLGenerator_generateOrderBy
/** * Generates sorting rules as an ORDER BY -clause. * * @param sb * StringBuffer to which the clause is appended. * @param o * OrderBy object to be added into the sb. * @param firstOrderBy * If true, this is the first OrderBy. * @return */ protected StringBuffer generateOrderBy(StringBuffer sb, OrderBy o, boolean firstOrderBy) { if (firstOrderBy) { sb.append(" ORDER BY "); } else { sb.append(", "); } sb.append(QueryBuilder.quote(o.getColumn())); if (o.isAscending()) { sb.append(" ASC"); } else { sb.append(" DESC"); } return sb; }
3.68
hbase_RowModel_getKey
/** Returns the row key */ public byte[] getKey() { return key; }
3.68
flink_FlinkContainers_beforeAll
// ------------------------ JUnit 5 lifecycle management ------------------------ @Override public void beforeAll(ExtensionContext context) throws Exception { this.start(); }
3.68
flink_MetricStore_getJobManagerMetricStore
/** * Returns the {@link ComponentMetricStore} for the JobManager. * * @return ComponentMetricStore for the JobManager */ public synchronized ComponentMetricStore getJobManagerMetricStore() { return ComponentMetricStore.unmodifiable(jobManager); }
3.68
druid_DataSourceSelectorEnum_newInstance
/** * Create a new instance of the DataSourceSelector represented by this enum. * * @return null if dataSource is not given or exception occurred while creating new instance */ public DataSourceSelector newInstance(HighAvailableDataSource dataSource) { if (dataSource == null) { LOG.warn("You should provide an instance of HighAvailableDataSource!"); return null; } DataSourceSelector selector = null; try { selector = clazz.getDeclaredConstructor(HighAvailableDataSource.class).newInstance(dataSource); } catch (Exception e) { LOG.error("Can not create new instance of " + clazz.getName(), e); } return selector; }
3.68
framework_VGridLayout_getCell
/** For internal use only. May be removed or replaced in the future. */ public Cell getCell(int row, int col) { return cells[col][row]; }
3.68
hbase_HFileBlockIndex_rootBlockContainingKey
/** * Finds the root-level index block containing the given key. Key to find * @return Offset of block containing <code>key</code> (between 0 and the number of blocks - 1) * or -1 if this file does not contain the request. */ // When we want to find the meta index block or bloom block for ROW bloom // type // Bytes.BYTES_RAWCOMPARATOR would be enough. For the ROW_COL bloom case we // need the CellComparator. public int rootBlockContainingKey(final byte[] key, int offset, int length) { return rootBlockContainingKey(key, offset, length, null); }
3.68
hudi_HoodieTable_rollbackInflightCompaction
/** * Rollback failed compactions. Inflight rollbacks for compactions revert the .inflight file * to the .requested file. * * @param inflightInstant Inflight Compaction Instant */ public void rollbackInflightCompaction(HoodieInstant inflightInstant, Function<String, Option<HoodiePendingRollbackInfo>> getPendingRollbackInstantFunc) { ValidationUtils.checkArgument(inflightInstant.getAction().equals(HoodieTimeline.COMPACTION_ACTION)); rollbackInflightInstant(inflightInstant, getPendingRollbackInstantFunc); }
3.68
hbase_Scan_setLimit
/** * Set the limit of rows for this scan. We will terminate the scan if the number of returned rows * reaches this value. * <p> * This condition will be tested at last, after all other conditions such as stopRow, filter, etc. * @param limit the limit of rows for this scan */ public Scan setLimit(int limit) { this.limit = limit; return this; }
3.68
hbase_ChoreService_rescheduleChore
/** * @param chore The Chore to be rescheduled. If the chore is not scheduled with this ChoreService * yet then this call is equivalent to a call to scheduleChore. */ private void rescheduleChore(ScheduledChore chore, boolean immediately) { if (scheduledChores.containsKey(chore)) { ScheduledFuture<?> future = scheduledChores.get(chore); future.cancel(false); } // set initial delay to 0 as we want to run it immediately ScheduledFuture<?> future = scheduler.scheduleAtFixedRate(chore, immediately ? 0 : chore.getPeriod(), chore.getPeriod(), chore.getTimeUnit()); scheduledChores.put(chore, future); } /** * Cancel any ongoing schedules that this chore has with the implementer of this interface. * <p/> * Call {@link ScheduledChore#cancel()} to cancel a {@link ScheduledChore}, in * {@link ScheduledChore#cancel()} method we will call this method to remove the * {@link ScheduledChore} from this {@link ChoreService}
3.68
hibernate-validator_ExecutableHelper_run
/** * Runs the given privileged action, using a privileged block if required. * <p> * <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary * privileged actions within HV's protection domain. */ @IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17") private <T> T run(PrivilegedAction<T> action) { return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run(); }
3.68
pulsar_AbstractDispatcherMultipleConsumers_getRandomConsumer
/** * Get random consumer from consumerList. * * @return null if no consumer available, else return random consumer from consumerList */ public Consumer getRandomConsumer() { if (consumerList.isEmpty() || IS_CLOSED_UPDATER.get(this) == TRUE) { // abort read if no consumers are connected of if disconnect is initiated return null; } return consumerList.get(ThreadLocalRandom.current().nextInt(consumerList.size())); }
3.68
flink_ResultSubpartitionView_notifyRequiredSegmentId
/** * In tiered storage shuffle mode, only required segments will be sent to prevent the redundant * buffer usage. Downstream will notify the upstream by this method to send required segments. * * @param segmentId segment id is the id indicating the required id. */ default void notifyRequiredSegmentId(int segmentId) {}
3.68
flink_CatalogDatabaseImpl_getDetailedDescription
/** * Get a detailed description of the database. * * @return an optional long description of the database */ public Optional<String> getDetailedDescription() { return Optional.ofNullable(comment); }
3.68
hudi_BloomFilterFactory_fromString
/** * Generate {@link BloomFilter} from serialized String. * * @param serString the serialized string of the {@link BloomFilter} * @param bloomFilterTypeCode bloom filter type code as string * @return the {@link BloomFilter} thus generated from the passed in serialized string */ public static BloomFilter fromString(String serString, String bloomFilterTypeCode) { if (bloomFilterTypeCode.equalsIgnoreCase(BloomFilterTypeCode.SIMPLE.name())) { return new SimpleBloomFilter(serString); } else if (bloomFilterTypeCode.equalsIgnoreCase(BloomFilterTypeCode.DYNAMIC_V0.name())) { return new HoodieDynamicBoundedBloomFilter(serString); } else { throw new IllegalArgumentException("Bloom Filter type code not recognizable " + bloomFilterTypeCode); } }
3.68
hmily_DatabaseMetaDataDialectHandlerFactory_findHandler
/** * Find database meta data dialect handler. * * @param databaseType database type * @return database meta data dialect handler */ public static Optional<DatabaseMetaDataDialectHandler> findHandler(final DatabaseType databaseType) { return Optional.ofNullable(ExtensionLoaderFactory.load(DatabaseMetaDataDialectHandler.class, databaseType.getName())); }
3.68
hudi_BaseConsistentHashingBucketClusteringPlanStrategy_isBucketClusteringMergeEnabled
/** * Whether enable buckets merged when using consistent hashing bucket index. * * @return true if bucket merge is enabled, false otherwise. */ protected boolean isBucketClusteringMergeEnabled() { return true; }
3.68