name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_ParquetVectorizedInputFormat_createReadableVectors
/** * Create readable vectors from writable vectors. Especially for decimal, see {@link * ParquetDecimalVector}. */ private ColumnVector[] createReadableVectors(WritableColumnVector[] writableVectors) { ColumnVector[] vectors = new ColumnVector[writableVectors.length]; for (int i = 0; i < writableVectors.length; i++) { vectors[i] = projectedTypes[i].getTypeRoot() == LogicalTypeRoot.DECIMAL ? new ParquetDecimalVector(writableVectors[i]) : writableVectors[i]; } return vectors; }
3.68
hbase_ServerName_getStartcode
/** * Return the start code. * @deprecated Since 2.5.0, will be removed in 4.0.0. Use {@link #getStartCode()} instead. */ @Deprecated public long getStartcode() { return startCode; }
3.68
hbase_MasterRpcServices_offlineRegion
/** * Offline specified region from master's in-memory state. It will not attempt to reassign the * region as in unassign. This is a special method that should be used by experts or hbck. */ @Override public OfflineRegionResponse offlineRegion(RpcController controller, OfflineRegionRequest request) throws ServiceException { try { server.checkInitialized(); final RegionSpecifierType type = request.getRegion().getType(); if (type != RegionSpecifierType.REGION_NAME) { LOG.warn("moveRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME + " actual: " + type); } final byte[] regionName = request.getRegion().getValue().toByteArray(); final RegionInfo hri = server.getAssignmentManager().getRegionInfo(regionName); if (hri == null) { throw new UnknownRegionException(Bytes.toStringBinary(regionName)); } if (server.cpHost != null) { server.cpHost.preRegionOffline(hri); } LOG.info(server.getClientIdAuditPrefix() + " offline " + hri.getRegionNameAsString()); server.getAssignmentManager().offlineRegion(hri); if (server.cpHost != null) { server.cpHost.postRegionOffline(hri); } } catch (IOException ioe) { throw new ServiceException(ioe); } return OfflineRegionResponse.newBuilder().build(); }
3.68
flink_QueryableStateStream_getStateDescriptor
/** * Returns the state descriptor for the queryable state instance. * * @return State descriptor for the state instance */ public StateDescriptor<?, V> getStateDescriptor() { return stateDescriptor; }
3.68
hudi_Key_compareTo
// Comparable @Override public int compareTo(Key other) { int result = this.bytes.length - other.getBytes().length; for (int i = 0; result == 0 && i < bytes.length; i++) { result = this.bytes[i] - other.bytes[i]; } if (result == 0) { result = (int) (this.weight - other.weight); } return result; }
3.68
hudi_ArchiveTask_newBuilder
/** * Utility to create builder for {@link ArchiveTask}. * * @return Builder for {@link ArchiveTask}. */ public static Builder newBuilder() { return new Builder(); }
3.68
hudi_HoodieFlinkWriteClient_insertOverwriteTable
/** * Removes all existing records of the Hoodie table and inserts the given HoodieRecords, into the table. * * @param records HoodieRecords to insert * @param instantTime Instant time of the commit * @return list of WriteStatus to inspect errors and counts */ public List<WriteStatus> insertOverwriteTable( List<HoodieRecord<T>> records, final String instantTime) { HoodieTable table = initTable(WriteOperationType.INSERT_OVERWRITE_TABLE, Option.ofNullable(instantTime)); table.validateInsertSchema(); preWrite(instantTime, WriteOperationType.INSERT_OVERWRITE_TABLE, table.getMetaClient()); // create the write handle if not exists HoodieWriteMetadata<List<WriteStatus>> result; try (AutoCloseableWriteHandle closeableHandle = new AutoCloseableWriteHandle(records, instantTime, table, true)) { result = ((HoodieFlinkTable<T>) table).insertOverwriteTable(context, closeableHandle.getWriteHandle(), instantTime, records); } return postWrite(result, instantTime, table); }
3.68
hadoop_BlockBlobAppendStream_hsync
/** * Force all data in the output stream to be written to Azure storage. * Wait to return until this is complete. */ @Override public void hsync() throws IOException { // when block compaction is disabled, hsync is empty function if (compactionEnabled) { flush(); } }
3.68
hbase_HFileBlockIndex_getRootBlockDataSize
/** * @param i zero-based index of a root-level block * @return the on-disk size of the root-level block for version 2, or the uncompressed size for * version 1 */ public int getRootBlockDataSize(int i) { return blockDataSizes[i]; }
3.68
hadoop_MultiSchemeDelegationTokenAuthenticationHandler_authenticate
/** * This method is overridden to restrict HTTP authentication schemes * available for delegation token management functionality. The * authentication schemes to be used for delegation token management are * configured using {@link DELEGATION_TOKEN_SCHEMES_PROPERTY} * * The basic logic here is to check if the current request is for delegation * token management. If yes then check if the request contains an * "Authorization" header. If it is missing, then return the HTTP 401 * response with WWW-Authenticate header for each scheme configured for * delegation token management. * * It is also possible for a client to preemptively send Authorization header * for a scheme not configured for delegation token management. We detect * this case and return the HTTP 401 response with WWW-Authenticate header * for each scheme configured for delegation token management. * * If a client has sent a request with "Authorization" header for a scheme * configured for delegation token management, then it is forwarded to * underlying {@link MultiSchemeAuthenticationHandler} for actual * authentication. * * Finally all other requests (excluding delegation token management) are * forwarded to underlying {@link MultiSchemeAuthenticationHandler} for * actual authentication. */ @Override public AuthenticationToken authenticate(HttpServletRequest request, HttpServletResponse response) throws IOException, AuthenticationException { String authorization = request.getHeader(HttpConstants.AUTHORIZATION_HEADER); if (isManagementOperation(request)) { boolean schemeConfigured = false; if (authorization != null) { for (String scheme : delegationAuthSchemes) { if (AuthenticationHandlerUtil. matchAuthScheme(scheme, authorization)) { schemeConfigured = true; break; } } } if (!schemeConfigured) { response.setStatus(HttpServletResponse.SC_UNAUTHORIZED); for (String scheme : delegationAuthSchemes) { response.addHeader(WWW_AUTHENTICATE, scheme); } return null; } } return super.authenticate(request, response); }
3.68
flink_FlinkRelMdCollation_sort
/** Helper method to determine a {@link org.apache.calcite.rel.core.Sort}'s collation. */ public static List<RelCollation> sort(RelCollation collation) { return com.google.common.collect.ImmutableList.of(collation); }
3.68
querydsl_ExpressionUtils_or
/** * Create a {@code left or right} expression * * @param left lhs of expression * @param right rhs of expression * @return left or right */ public static Predicate or(Predicate left, Predicate right) { left = (Predicate) extract(left); right = (Predicate) extract(right); if (left == null) { return right; } else if (right == null) { return left; } else { return predicate(Ops.OR, left, right); } }
3.68
hadoop_BlockBlobAppendStream_addFlushCommand
/** * Prepare block list commit command and queue the command in thread pool * executor. */ private synchronized UploadCommand addFlushCommand() throws IOException { maybeThrowFirstError(); if (blobExist && lease.isFreed()) { throw new AzureException( String.format("Attempting to upload block list on blob : %s" + " that does not have lease on the Blob. Failing upload", key)); } UploadCommand command = new UploadBlockListCommand(); activeBlockCommands.add(command); ioThreadPool.execute(new WriteRequest(command)); return command; }
3.68
framework_VComboBox_updateMenuWidth
/** * Adds in-line CSS rules to the DOM according to the * suggestionPopupWidth field * * @param desiredWidth * @param naturalMenuWidth */ private void updateMenuWidth(final int desiredWidth, int naturalMenuWidth) { /** * Three different width modes for the suggestion pop-up: * * 1. Legacy "null"-mode: width is determined by the longest item * caption for each page while still maintaining minimum width of * (desiredWidth - popupOuterPadding) * * 2. relative to the component itself * * 3. fixed width */ String width = "auto"; if (suggestionPopupWidth == null) { if (naturalMenuWidth < desiredWidth) { naturalMenuWidth = desiredWidth - popupOuterPadding; width = desiredWidth - popupOuterPadding + "px"; } } else if (isrelativeUnits(suggestionPopupWidth)) { float mainComponentWidth = desiredWidth - popupOuterPadding; // convert percentage value to fraction int widthInPx = Math.round( mainComponentWidth * asFraction(suggestionPopupWidth)); width = widthInPx + "px"; } else { // use as fixed width CSS definition width = WidgetUtil.escapeAttribute(suggestionPopupWidth); } menu.setWidth(width); }
3.68
flink_LocalInputChannel_retriggerSubpartitionRequest
/** Retriggers a subpartition request. */ void retriggerSubpartitionRequest(Timer timer) { synchronized (requestLock) { checkState(subpartitionView == null, "already requested partition"); timer.schedule( new TimerTask() { @Override public void run() { try { requestSubpartition(); } catch (Throwable t) { setError(t); } } }, getCurrentBackoff()); } }
3.68
hbase_ServerManager_isServerUnknown
/** * Check if a server is unknown. A server can be online, or known to be dead, or unknown to this * manager (i.e, not online, not known to be dead either; it is simply not tracked by the master * any more, for example, a very old previous instance). */ public boolean isServerUnknown(ServerName serverName) { return serverName == null || (!onlineServers.containsKey(serverName) && !deadservers.isDeadServer(serverName)); }
3.68
morf_UpgradeTableResolution_getReadTables
/** * @param upgradeStepName name of the class of the upgrade step to be checked * @return all tables read by given upgrade step */ public Set<String> getReadTables(String upgradeStepName) { return resolvedTablesMap.get(upgradeStepName) == null ? null : resolvedTablesMap.get(upgradeStepName).getReadTables(); }
3.68
pulsar_AbstractMetrics_createMetrics
/** * Creates a metrics with empty immutable dimension. * <p> * Use this for metrics that doesn't need any dimension - i.e global metrics * * @return */ protected Metrics createMetrics() { return createMetrics(new HashMap<String, String>()); }
3.68
hbase_MergeTableRegionsProcedure_cleanupMergedRegion
/** * Clean up a merged region on rollback after failure. */ private void cleanupMergedRegion(final MasterProcedureEnv env) throws IOException { final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); TableName tn = this.regionsToMerge[0].getTable(); final Path tabledir = CommonFSUtils.getTableDir(mfs.getRootDir(), tn); final FileSystem fs = mfs.getFileSystem(); // See createMergedRegion above where we specify the merge dir as being in the // FIRST merge parent region. HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem( env.getMasterConfiguration(), fs, tabledir, regionsToMerge[0], false); regionFs.cleanupMergedRegion(mergedRegion); }
3.68
hudi_ConsistentBucketIdentifier_getFormerBucket
/** * Get the former node of the given node (inferred from hash value). */ public ConsistentHashingNode getFormerBucket(int hashValue) { SortedMap<Integer, ConsistentHashingNode> headMap = ring.headMap(hashValue); return headMap.isEmpty() ? ring.lastEntry().getValue() : headMap.get(headMap.lastKey()); }
3.68
morf_SqlUtils_math
/** * Returns a mathematical expression. * * @param leftField The left field * @param operator The operator * @param rightField The right field * @return the expression. */ public static MathsField math(AliasedField leftField, MathsOperator operator, AliasedField rightField) { return new MathsField(leftField, operator, rightField); }
3.68
hadoop_SingleFilePerBlockCache_takeLock
/** * Try to take the read or write lock within the given timeout. * * @param lockType type of the lock. * @param timeout the time to wait for the given lock. * @param unit the time unit of the timeout argument. * @return true if the lock of the given lock type was acquired. */ private boolean takeLock(LockType lockType, long timeout, TimeUnit unit) { try { if (LockType.READ == lockType) { return lock.readLock().tryLock(timeout, unit); } else if (LockType.WRITE == lockType) { return lock.writeLock().tryLock(timeout, unit); } } catch (InterruptedException e) { LOG.warn("Thread interrupted while trying to acquire {} lock", lockType, e); Thread.currentThread().interrupt(); } return false; }
3.68
hadoop_MoveStep_getBandwidth
/** * Gets the disk Bandwidth. That is the MB/Sec to copied. We will max out * on this amount of throughput. This is useful to prevent too much I/O on * datanode while data node is in use. * @return long. */ @Override public long getBandwidth() { return bandwidth; }
3.68
hbase_ServerManager_getVersion
/** * May return "0.0.0" when server is not online */ public String getVersion(ServerName serverName) { ServerMetrics serverMetrics = onlineServers.get(serverName); return serverMetrics != null ? serverMetrics.getVersion() : "0.0.0"; }
3.68
flink_WriterProperties_getPendingFileRecoverableSerializer
/** @return the serializer for the {@link InProgressFileWriter.PendingFileRecoverable}. */ public SimpleVersionedSerializer<InProgressFileWriter.PendingFileRecoverable> getPendingFileRecoverableSerializer() { return pendingFileRecoverableSerializer; }
3.68
flink_TieredStorageResourceRegistry_clearResourceFor
/** * Remove all resources for the given owner. * * @param owner identifier of the data that the resources correspond to. */ public void clearResourceFor(TieredStorageDataIdentifier owner) { List<TieredStorageResource> cleanersForOwner = registeredResources.remove(owner); if (cleanersForOwner != null) { cleanersForOwner.forEach(TieredStorageResource::release); } }
3.68
rocketmq-connect_ColumnDefinition_isOptional
/** * Indicates whether values in the column are optional. This is equivalent to calling: * <pre> * nullability() == Nullability.NULL || nullability() == Nullability.UNKNOWN * </pre> * * @return <code>true</code> if so; <code>false</code> otherwise */ public boolean isOptional() { return nullability == Nullability.NULL || nullability == Nullability.UNKNOWN; }
3.68
hbase_InfoServer_addPrivilegedServlet
/** * Adds a servlet in the server that any user can access. * @see HttpServer#addPrivilegedServlet(String, String, Class) */ public void addPrivilegedServlet(String name, String pathSpec, Class<? extends HttpServlet> clazz) { this.httpServer.addPrivilegedServlet(name, pathSpec, clazz); }
3.68
rocketmq-connect_RetryWithToleranceOperator_execAndHandleError
/** * Execute a given operation multiple times (if needed), and tolerate certain exceptions. */ protected <V> V execAndHandleError(Operation<V> operation, Class<? extends Exception> tolerated) { try { V result = execAndRetry(operation); if (context.failed()) { markAsFailed(); errorMetricsGroup.recordSkipped(); } return result; } catch (Exception e) { errorMetricsGroup.recordFailure(); markAsFailed(); context.error(e); if (!tolerated.isAssignableFrom(e.getClass())) { throw new ConnectException("Unhandled exception in error handler", e); } if (!withinToleranceLimits()) { throw new ConnectException("Tolerance exceeded in error handler", e); } errorMetricsGroup.recordSkipped(); return null; } }
3.68
dubbo_MemorySafeLinkedBlockingQueue_setMaxFreeMemory
/** * set the max free memory. * * @param maxFreeMemory the max free memory */ public void setMaxFreeMemory(final int maxFreeMemory) { this.maxFreeMemory = maxFreeMemory; }
3.68
pulsar_PrecisePublishLimiter_tryReleaseConnectionThrottle
// If all rate limiters are not exceeded, re-enable auto read from socket. private void tryReleaseConnectionThrottle() { RateLimiter currentTopicPublishRateLimiterOnMessage = topicPublishRateLimiterOnMessage; RateLimiter currentTopicPublishRateLimiterOnByte = topicPublishRateLimiterOnByte; if ((currentTopicPublishRateLimiterOnMessage != null && currentTopicPublishRateLimiterOnMessage.getAvailablePermits() <= 0) || (currentTopicPublishRateLimiterOnByte != null && currentTopicPublishRateLimiterOnByte.getAvailablePermits() <= 0)) { return; } this.rateLimitFunction.apply(); }
3.68
hbase_MutableRegionInfo_isSplit
/** Returns True if has been split and has daughters. */ @Override public boolean isSplit() { return this.split; }
3.68
hbase_TableInputFormatBase_setTableRecordReader
/** * Allows subclasses to set the {@link TableRecordReader}. to provide other * {@link TableRecordReader} implementations. */ protected void setTableRecordReader(TableRecordReader tableRecordReader) { this.tableRecordReader = tableRecordReader; }
3.68
hudi_FlinkOptions_fromMap
/** * Creates a new configuration that is initialized with the options of the given map. */ public static Configuration fromMap(Map<String, String> map) { final Configuration configuration = new Configuration(); for (Map.Entry<String, String> entry : map.entrySet()) { configuration.setString(entry.getKey().trim(), entry.getValue()); } return configuration; }
3.68
hudi_HoodieFlinkWriteClient_preTxn
/** * Refresh the last transaction metadata, * should be called before the Driver starts a new transaction. */ public void preTxn(WriteOperationType operationType, HoodieTableMetaClient metaClient) { if (txnManager.isLockRequired() && config.needResolveWriteConflict(operationType)) { // refresh the meta client which is reused metaClient.reloadActiveTimeline(); this.lastCompletedTxnAndMetadata = TransactionUtils.getLastCompletedTxnInstantAndMetadata(metaClient); this.pendingInflightAndRequestedInstants = TransactionUtils.getInflightAndRequestedInstants(metaClient); } tableServiceClient.startAsyncArchiveService(this); }
3.68
hadoop_FederationStateStoreFacade_createRetryInstance
/** * Helper method to create instances of Object using the class name defined in * the configuration object. The instances creates {@link RetryProxy} using * the specific {@link RetryPolicy}. * * @param conf the yarn configuration * @param configuredClassName the configuration provider key * @param defaultValue the default implementation for fallback * @param type the class for which a retry proxy is required * @param retryPolicy the policy for retrying method call failures * @param <T> The type of the instance. * @return a retry proxy for the specified interface */ public static <T> Object createRetryInstance(Configuration conf, String configuredClassName, String defaultValue, Class<T> type, RetryPolicy retryPolicy) { return RetryProxy.create(type, createInstance(conf, configuredClassName, defaultValue, type), retryPolicy); }
3.68
querydsl_SQLExpressions_varPop
/** * returns the population variance of a set of numbers after discarding the nulls in this set. * * @param expr argument * @return var_pop(expr) */ public static <T extends Number> WindowOver<T> varPop(Expression<T> expr) { return new WindowOver<T>(expr.getType(), SQLOps.VARPOP, expr); }
3.68
hbase_HFileBlockIndex_getMidKeyMetadata
/** * Used when writing the root block index of a multi-level block index. Serializes additional * information allowing to efficiently identify the mid-key. * @return a few serialized fields for finding the mid-key * @throws IOException if could not create metadata for computing mid-key */ @Override public byte[] getMidKeyMetadata() throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream(MID_KEY_METADATA_SIZE); DataOutputStream baosDos = new DataOutputStream(baos); long totalNumSubEntries = numSubEntriesAt.get(blockKeys.size() - 1); if (totalNumSubEntries == 0) { throw new IOException("No leaf-level entries, mid-key unavailable"); } long midKeySubEntry = (totalNumSubEntries - 1) / 2; int midKeyEntry = getEntryBySubEntry(midKeySubEntry); baosDos.writeLong(blockOffsets.get(midKeyEntry)); baosDos.writeInt(onDiskDataSizes.get(midKeyEntry)); long numSubEntriesBefore = midKeyEntry > 0 ? numSubEntriesAt.get(midKeyEntry - 1) : 0; long subEntryWithinEntry = midKeySubEntry - numSubEntriesBefore; if (subEntryWithinEntry < 0 || subEntryWithinEntry > Integer.MAX_VALUE) { throw new IOException("Could not identify mid-key index within the " + "leaf-level block containing mid-key: out of range (" + subEntryWithinEntry + ", numSubEntriesBefore=" + numSubEntriesBefore + ", midKeySubEntry=" + midKeySubEntry + ")"); } baosDos.writeInt((int) subEntryWithinEntry); if (baosDos.size() != MID_KEY_METADATA_SIZE) { throw new IOException("Could not write mid-key metadata: size=" + baosDos.size() + ", correct size: " + MID_KEY_METADATA_SIZE); } // Close just to be good citizens, although this has no effect. baos.close(); return baos.toByteArray(); }
3.68
hadoop_HSAuditLogger_start
/** * Adds the first key-val pair to the passed builder in the following format * key=value */ static void start(Keys key, String value, StringBuilder b) { b.append(key.name()).append(AuditConstants.KEY_VAL_SEPARATOR).append(value); }
3.68
framework_VTwinColSelect_getNavigationSelectKey
/** * Get the key that selects an item in the table. By default it is the Enter * key but by overriding this you can change the key to whatever you want. * * @return the key that selects an item */ protected int getNavigationSelectKey() { return KeyCodes.KEY_ENTER; }
3.68
rocketmq-connect_DebeziumMongoDBConnector_getConnectorClass
/** * get connector class */ @Override public String getConnectorClass() { return DEFAULT_CONNECTOR; }
3.68
MagicPlugin_EntityController_onEntityDeath
/** * This death handler is for mobs and players alike */ @EventHandler(priority = EventPriority.LOWEST) public void onEntityDeath(EntityDeathEvent event) { Entity entity = event.getEntity(); boolean isPlayer = entity instanceof Player; if (isPlayer) { EntityDamageEvent.DamageCause cause = entity.getLastDamageCause() == null ? null : entity.getLastDamageCause().getCause(); controller.info("* Processing death of " + entity.getName() + " from " + cause + " with drops: " + event.getDrops().size(), 15); } Long spawnerId = CompatibilityLib.getEntityMetadataUtils().getLong(entity, MagicMetaKeys.AUTOMATION); if (spawnerId != null) { MagicBlock magicBlock = controller.getActiveAutomaton(spawnerId); if (magicBlock != null) { magicBlock.onSpawnDeath(); } } // Just don't ever clear player death drops, for real if (!isPlayer) { if (CompatibilityLib.getEntityMetadataUtils().getBoolean(entity, MagicMetaKeys.NO_DROPS)) { event.setDroppedExp(0); event.getDrops().clear(); } else { UndoList pendingUndo = controller.getEntityUndo(entity); if (pendingUndo != null && pendingUndo.isUndoType(entity.getType())) { event.getDrops().clear(); } } } else { // Clean up metadata that shouldn't be on players CompatibilityLib.getEntityMetadataUtils().remove(entity, MagicMetaKeys.NO_DROPS); } EntityDamageEvent damageEvent = event.getEntity().getLastDamageCause(); if (damageEvent instanceof EntityDamageByEntityEvent) { EntityDamageByEntityEvent dbe = (EntityDamageByEntityEvent)damageEvent; Entity damager = dbe.getDamager(); damager = controller.getDamageSource(damager); if (damager != null) { Mage damagerMage = controller.getRegisteredMage(damager); if (damagerMage != null) { damagerMage.trigger("kill"); } } } com.elmakers.mine.bukkit.magic.Mage mage = controller.getRegisteredMage(entity); if (mage == null) return; mage.deactivateAllSpells(); mage.onDeath(event); if (isPlayer) { controller.info("* Mage class handled death, drops now: " + event.getDrops().size(), 15); } if (event instanceof PlayerDeathEvent) { PlayerDeathEvent playerDeath = (PlayerDeathEvent)event; handlePlayerDeath(playerDeath.getEntity(), mage, playerDeath.getDrops(), playerDeath.getKeepInventory()); } }
3.68
dubbo_EnvironmentAdapter_getExtraAttributes
/** * 1. OS Environment: DUBBO_LABELS=tag=pre;key=value * 2. JVM Options: -Denv_keys = DUBBO_KEY1, DUBBO_KEY2 * * @param params information of this Dubbo process, currently includes application name and host address. */ @Override public Map<String, String> getExtraAttributes(Map<String, String> params) { Map<String, String> parameters = new HashMap<>(); String rawLabels = ConfigurationUtils.getProperty(applicationModel, DUBBO_LABELS); if (StringUtils.isNotEmpty(rawLabels)) { String[] labelPairs = SEMICOLON_SPLIT_PATTERN.split(rawLabels); for (String pair : labelPairs) { String[] label = EQUAL_SPLIT_PATTERN.split(pair); if (label.length == 2) { parameters.put(label[0], label[1]); } } } String rawKeys = ConfigurationUtils.getProperty(applicationModel, DUBBO_ENV_KEYS); if (StringUtils.isNotEmpty(rawKeys)) { String[] keys = COMMA_SPLIT_PATTERN.split(rawKeys); for (String key : keys) { String value = ConfigurationUtils.getProperty(applicationModel, key); if (value != null) { // since 3.2 parameters.put(key.toLowerCase(), value); // upper-case key kept for compatibility parameters.put(key, value); } } } return parameters; }
3.68
framework_NativeButtonClick_setup
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server. * VaadinRequest) */ @Override protected void setup(VaadinRequest request) { final Label label1 = new Label("0,0"); final Label label2 = new Label("0,0"); Button button1 = new NativeButton("Button1", event -> label1 .setValue(event.getClientX() + "," + event.getClientY())); Button button2 = new NativeButton("Button2", event -> label2 .setValue(event.getClientX() + "," + event.getClientY())); HorizontalLayout layout = new HorizontalLayout(); layout.addComponents(button1, button2, label1, label2); layout.setSpacing(true); addComponent(layout); }
3.68
morf_AbstractSqlDialectTest_testUseImplicitJoinOrderOnMerge
/** * Check that we don't allow the use of the join order hint with a MERGE. */ @Test(expected = IllegalArgumentException.class) public void testUseImplicitJoinOrderOnMerge() { testDialect.convertStatementToSQL( merge() .into(tableRef("a")) .from( select() .from(tableRef("b")) .useImplicitJoinOrder() ) .tableUniqueKey(field("id")) ); }
3.68
pulsar_PersistentAcknowledgmentsGroupingTracker_isDuplicate
/** * Since the ack are delayed, we need to do some best-effort duplicate check to discard messages that are being * resent after a disconnection and for which the user has already sent an acknowledgement. */ @Override public boolean isDuplicate(MessageId messageId) { if (!(messageId instanceof MessageIdAdv)) { throw new IllegalArgumentException("isDuplicated cannot accept " + messageId.getClass().getName() + ": " + messageId); } final MessageIdAdv messageIdAdv = (MessageIdAdv) messageId; if (lastCumulativeAck.compareTo(messageIdAdv) >= 0) { // Already included in a cumulative ack return true; } else { // If "batchIndexAckEnabled" is false, the batched messages acknowledgment will be traced by // pendingIndividualAcks. So no matter what type the message ID is, check with "pendingIndividualAcks" // first. MessageIdAdv key = MessageIdAdvUtils.discardBatch(messageIdAdv); if (pendingIndividualAcks.contains(key)) { return true; } if (messageIdAdv.getBatchIndex() >= 0) { ConcurrentBitSetRecyclable bitSet = pendingIndividualBatchIndexAcks.get(key); return bitSet != null && !bitSet.get(messageIdAdv.getBatchIndex()); } return false; } }
3.68
hibernate-validator_ValidationProviderHelper_getValidatorFactoryBeanClass
/** * Determines the class of the {@link ValidatorFactory} corresponding to the given configuration object. */ Class<? extends ValidatorFactory> getValidatorFactoryBeanClass() { return validatorFactoryClass; }
3.68
hbase_HFileCorruptionChecker_getFailureMobFiles
/** Returns the set of check failure mob file paths after checkTables is called. */ public Collection<Path> getFailureMobFiles() { return new HashSet<>(failureMobFiles); }
3.68
hbase_MasterObserver_preUpdateRSGroupConfig
/** * Called before update rsgroup config. * @param ctx the environment to interact with the framework and master * @param groupName the group name * @param configuration new configuration of the group name to be set */ default void preUpdateRSGroupConfig(final ObserverContext<MasterCoprocessorEnvironment> ctx, final String groupName, final Map<String, String> configuration) throws IOException { }
3.68
hbase_ZKUtil_setData
/** Returns a setData ZKUtilOp */ public static ZKUtilOp setData(String path, byte[] data, int version) { return new SetData(path, data, version); }
3.68
flink_KubernetesCheckpointStoreUtil_checkpointIDToName
/** * Convert a checkpoint id into a ConfigMap key. * * @param checkpointId to convert to the key * @return key created from the given checkpoint id */ @Override public String checkpointIDToName(long checkpointId) { return CHECKPOINT_ID_KEY_PREFIX + String.format("%019d", checkpointId); }
3.68
morf_ConnectionResourcesBean_getFetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming
/** * @see ConnectionResources#getFetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming() */ @Override public Integer getFetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming() { return fetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming; }
3.68
framework_Navigator_getCurrentNavigationState
/** * Returns the current navigation state for which the * {@link #getCurrentView()} has been constructed. This may differ to * {@link #getState()} in case the URL has been changed on the browser and * the navigator wasn't yet given an opportunity to construct new view. The * state is in the form of * <code>current-view-name/optional/parameters</code> * * @return the current navigation state, may be {@code null}. */ public String getCurrentNavigationState() { return currentNavigationState; }
3.68
hudi_MercifulJsonConverter_getFieldTypeProcessors
/** * Build type processor map for each avro type. */ private static Map<Schema.Type, JsonToAvroFieldProcessor> getFieldTypeProcessors() { return Collections.unmodifiableMap(new HashMap<Schema.Type, JsonToAvroFieldProcessor>() { { put(Type.STRING, generateStringTypeHandler()); put(Type.BOOLEAN, generateBooleanTypeHandler()); put(Type.DOUBLE, generateDoubleTypeHandler()); put(Type.FLOAT, generateFloatTypeHandler()); put(Type.INT, generateIntTypeHandler()); put(Type.LONG, generateLongTypeHandler()); put(Type.ARRAY, generateArrayTypeHandler()); put(Type.RECORD, generateRecordTypeHandler()); put(Type.ENUM, generateEnumTypeHandler()); put(Type.MAP, generateMapTypeHandler()); put(Type.BYTES, generateBytesTypeHandler()); put(Type.FIXED, generateFixedTypeHandler()); } }); }
3.68
hbase_ServerName_parseServerName
/** * Parse a ServerName from a string * @param str Either an instance of {@link #toString()} or a "'&lt;hostname&gt;' ':' * '&lt;port&gt;'". * @return A ServerName instance. */ public static ServerName parseServerName(final String str) { return SERVERNAME_PATTERN.matcher(str).matches() ? valueOf(str) : valueOf(str, NON_STARTCODE); }
3.68
flink_SqlGatewayRestAPIVersion_fromURIToVersion
/** * Convert uri to SqlGatewayRestAPIVersion. If failed, return default version. * * @return SqlGatewayRestAPIVersion */ public static SqlGatewayRestAPIVersion fromURIToVersion(String uri) { int slashIndex = uri.indexOf('/', 1); if (slashIndex < 0) { slashIndex = uri.length(); } try { return valueOf(uri.substring(1, slashIndex).toUpperCase()); } catch (Exception e) { return getDefaultVersion(); } }
3.68
flink_BloomFilter_toBytes
/** Serializing to bytes, note that only heap memory is currently supported. */ public static byte[] toBytes(BloomFilter filter) { byte[] data = filter.bitSet.toBytes(); int byteSize = data.length; byte[] bytes = new byte[8 + byteSize]; UNSAFE.putInt(bytes, BYTE_ARRAY_BASE_OFFSET, filter.numHashFunctions); UNSAFE.putInt(bytes, BYTE_ARRAY_BASE_OFFSET + 4, byteSize); UNSAFE.copyMemory( data, BYTE_ARRAY_BASE_OFFSET, bytes, BYTE_ARRAY_BASE_OFFSET + 8, byteSize); return bytes; }
3.68
pulsar_PulsarRecordCursor_getSchemaInfo
/** * Get the schemaInfo of the message. * * 1. If the schema type of pulsarSplit is NONE or BYTES, use the BYTES schema. * 2. If the schema type of pulsarSplit is BYTEBUFFER, use the BYTEBUFFER schema. * 3. If the schema version of the message is null, use the schema info of pulsarSplit. * 4. If the schema version of the message is not null, get the specific version schema by PulsarAdmin. * 5. If the final schema is null throw a runtime exception. */ private SchemaInfo getSchemaInfo(PulsarSplit pulsarSplit) { SchemaInfo schemaInfo = getBytesSchemaInfo(pulsarSplit.getSchemaType(), pulsarSplit.getSchemaName()); if (schemaInfo != null) { return schemaInfo; } try { if (this.currentMessage.getSchemaVersion() == null || this.currentMessage.getSchemaVersion().length == 0) { schemaInfo = pulsarSplit.getSchemaInfo(); } else { schemaInfo = schemaInfoProvider.getSchemaByVersion(this.currentMessage.getSchemaVersion()).get(); } } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } if (schemaInfo == null) { String schemaVersion = this.currentMessage.getSchemaVersion() == null ? "null" : BytesSchemaVersion.of(this.currentMessage.getSchemaVersion()).toString(); throw new RuntimeException("The specific version (" + schemaVersion + ") schema of the table " + pulsarSplit.getTableName() + " is null"); } return schemaInfo; }
3.68
hbase_StoreFileInfo_getHDFSBlockDistribution
/** Returns the HDFS block distribution */ public HDFSBlocksDistribution getHDFSBlockDistribution() { return this.hdfsBlocksDistribution; }
3.68
hbase_IPCUtil_isFatalConnectionException
/** Returns True if the exception is a fatal connection exception. */ static boolean isFatalConnectionException(final ExceptionResponse e) { return e.getExceptionClassName().equals(FatalConnectionException.class.getName()); }
3.68
hbase_SimplePositionedMutableByteRange_setLength
/** * Update the length of this range. {@code offset + length} should not be greater than * {@code bytes.length}. If {@code position} is greater than the new {@code length}, sets * {@code position} to {@code length}. The new length of this range. * @return this. */ @Override public PositionedByteRange setLength(int length) { this.position = Math.min(position, length); super.setLength(length); return this; }
3.68
framework_MessageSender_setClientToServerMessageId
/** * Used internally to update what the server expects. * * @param nextExpectedId * the new client id to set * @param force * true if the id must be updated, false otherwise */ public void setClientToServerMessageId(int nextExpectedId, boolean force) { if (nextExpectedId == clientToServerMessageId) { // No op as everything matches they way it should return; } if (force) { getLogger().info( "Forced update of clientId to " + clientToServerMessageId); clientToServerMessageId = nextExpectedId; return; } if (nextExpectedId > clientToServerMessageId) { if (clientToServerMessageId == 0) { // We have never sent a message to the server, so likely the // server knows better (typical case is that we refreshed a // @PreserveOnRefresh UI) getLogger().info("Updating client-to-server id to " + nextExpectedId + " based on server"); } else { getLogger().warning( "Server expects next client-to-server id to be " + nextExpectedId + " but we were going to use " + clientToServerMessageId + ". Will use " + nextExpectedId + "."); } clientToServerMessageId = nextExpectedId; } else { // Server has not yet seen all our messages // Do nothing as they will arrive eventually } }
3.68
zxing_HighLevelEncoder_encode
/** * @return text represented by this encoder encoded as a {@link BitArray} */ public BitArray encode() { State initialState = State.INITIAL_STATE; if (charset != null) { CharacterSetECI eci = CharacterSetECI.getCharacterSetECI(charset); if (null == eci) { throw new IllegalArgumentException("No ECI code for character set " + charset); } initialState = initialState.appendFLGn(eci.getValue()); } Collection<State> states = Collections.singletonList(initialState); for (int index = 0; index < text.length; index++) { int pairCode; int nextChar = index + 1 < text.length ? text[index + 1] : 0; switch (text[index]) { case '\r': pairCode = nextChar == '\n' ? 2 : 0; break; case '.' : pairCode = nextChar == ' ' ? 3 : 0; break; case ',' : pairCode = nextChar == ' ' ? 4 : 0; break; case ':' : pairCode = nextChar == ' ' ? 5 : 0; break; default: pairCode = 0; } if (pairCode > 0) { // We have one of the four special PUNCT pairs. Treat them specially. // Get a new set of states for the two new characters. states = updateStateListForPair(states, index, pairCode); index++; } else { // Get a new set of states for the new character. states = updateStateListForChar(states, index); } } // We are left with a set of states. Find the shortest one. State minState = Collections.min(states, new Comparator<State>() { @Override public int compare(State a, State b) { return a.getBitCount() - b.getBitCount(); } }); // Convert it to a bit array, and return. return minState.toBitArray(text); }
3.68
querydsl_SQLExpressions_datetrunc
/** * Truncate the given datetime expression * * @param unit datepart to truncate to * @param expr truncated datetime */ public static <D extends Comparable> DateTimeExpression<D> datetrunc(DatePart unit, DateTimeExpression<D> expr) { return Expressions.dateTimeOperation(expr.getType(), DATE_TRUNC_OPS.get(unit), expr); }
3.68
framework_JSR356WebsocketInitializer_init
/** * Initializes Atmosphere for use with Vaadin servlets found in the given * context. * <p> * For JSR 356 websockets to work properly, the initialization must be done * in the servlet context initialization phase. * * @param servletContext * The servlet context */ public void init(ServletContext servletContext) { if (!atmosphereAvailable) { return; } Map<String, ? extends ServletRegistration> regs = servletContext .getServletRegistrations(); for (String servletName : regs.keySet()) { ServletRegistration servletRegistration = regs.get(servletName); if (isVaadinServlet(servletRegistration, servletContext)) { try { initAtmosphereForVaadinServlet(servletRegistration, servletContext); } catch (Exception e) { getLogger().log(Level.WARNING, "Failed to initialize Atmosphere for " + servletName, e); } } } }
3.68
framework_StringToIntegerConverter_getFormat
/** * Returns the format used by * {@link #convertToPresentation(Integer, Class, Locale)} and * {@link #convertToModel(String, Class, Locale)}. * * @param locale * The locale to use * @return A NumberFormat instance */ @Override protected NumberFormat getFormat(Locale locale) { if (locale == null) { locale = Locale.getDefault(); } return NumberFormat.getIntegerInstance(locale); }
3.68
hbase_MiniHBaseCluster_compact
/** * Call flushCache on all regions of the specified table. */ public void compact(TableName tableName, boolean major) throws IOException { for (JVMClusterUtil.RegionServerThread t : this.hbaseCluster.getRegionServers()) { for (HRegion r : t.getRegionServer().getOnlineRegionsLocalContext()) { if (r.getTableDescriptor().getTableName().equals(tableName)) { if (RegionReplicaUtil.isDefaultReplica(r.getRegionInfo())) { r.compact(major); } } } } }
3.68
hadoop_NodePlan_addStep
/** * Adds a step to the existing Plan. * * @param nextStep - nextStep */ void addStep(Step nextStep) { Preconditions.checkNotNull(nextStep); volumeSetPlans.add(nextStep); }
3.68
hbase_DataBlockEncoding_getId
/** Returns The id of a data block encoder. */ public short getId() { return id; }
3.68
hbase_TableIntegrityErrorHandlerImpl_setTableInfo
/** * {@inheritDoc} */ @Override public void setTableInfo(HbckTableInfo ti2) { this.ti = ti2; }
3.68
framework_BootstrapHandler_resolveFrontendUrl
/** * Resolves the URL to use for the {@literal frontend://} protocol. * * @param session * the session of the user to resolve the protocol for * @return the URL that frontend:// resolves to, possibly using another * internal protocol * @since 8.1 */ public static String resolveFrontendUrl(VaadinSession session) { DeploymentConfiguration configuration = session.getConfiguration(); String frontendUrl; if (session.getBrowser().isEs6Supported()) { frontendUrl = configuration.getApplicationOrSystemProperty( ApplicationConstants.FRONTEND_URL_ES6, ApplicationConstants.FRONTEND_URL_ES6_DEFAULT_VALUE); } else { frontendUrl = configuration.getApplicationOrSystemProperty( ApplicationConstants.FRONTEND_URL_ES5, ApplicationConstants.FRONTEND_URL_ES5_DEFAULT_VALUE); } if (!frontendUrl.endsWith("/")) { frontendUrl += "/"; } return frontendUrl; }
3.68
flink_RocksDBStateBackend_getCheckpointBackend
/** * Gets the state backend that this RocksDB state backend uses to persist its bytes to. * * <p>This RocksDB state backend only implements the RocksDB specific parts, it relies on the * 'CheckpointBackend' to persist the checkpoint and savepoint bytes streams. */ public StateBackend getCheckpointBackend() { return checkpointStreamBackend; }
3.68
hbase_ColumnPaginationFilter_areSerializedFieldsEqual
/** * Returns true if and only if the fields of the filter that are serialized are equal to the * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) { return true; } if (!(o instanceof ColumnPaginationFilter)) { return false; } ColumnPaginationFilter other = (ColumnPaginationFilter) o; if (this.columnOffset != null) { return this.getLimit() == other.getLimit() && Bytes.equals(this.getColumnOffset(), other.getColumnOffset()); } return this.getLimit() == other.getLimit() && this.getOffset() == other.getOffset(); }
3.68
hadoop_FederationStateStoreFacade_storeNewMasterKey
/** * The Router Supports Store NewMasterKey (RouterMasterKey{@link RouterMasterKey}). * * @param newKey Key used for generating and verifying delegation tokens * @throws YarnException if the call to the state store is unsuccessful * @throws IOException An IO Error occurred * @return RouterMasterKeyResponse */ public RouterMasterKeyResponse storeNewMasterKey(DelegationKey newKey) throws YarnException, IOException { LOG.info("Storing master key with keyID {}.", newKey.getKeyId()); ByteBuffer keyBytes = ByteBuffer.wrap(newKey.getEncodedKey()); RouterMasterKey masterKey = RouterMasterKey.newInstance(newKey.getKeyId(), keyBytes, newKey.getExpiryDate()); RouterMasterKeyRequest keyRequest = RouterMasterKeyRequest.newInstance(masterKey); return stateStore.storeNewMasterKey(keyRequest); }
3.68
flink_StreamGraphGenerator_getParentInputIds
/** * Returns a list of lists containing the ids of the nodes in the transformation graph that * correspond to the provided transformations. Each transformation may have multiple nodes. * * <p>Parent transformations will be translated if they are not already translated. * * @param parentTransformations the transformations whose node ids to return. * @return the nodeIds per transformation or an empty list if the {@code parentTransformations} * are empty. */ private List<Collection<Integer>> getParentInputIds( @Nullable final Collection<Transformation<?>> parentTransformations) { final List<Collection<Integer>> allInputIds = new ArrayList<>(); if (parentTransformations == null) { return allInputIds; } for (Transformation<?> transformation : parentTransformations) { allInputIds.add(transform(transformation)); } return allInputIds; }
3.68
hbase_ChunkCreator_putbackChunks
/** * Add the chunks to the pool, when the pool achieves the max size, it will skip the remaining * chunks */ private void putbackChunks(Chunk c) { int toAdd = this.maxCount - reclaimedChunks.size(); if (c.isFromPool() && c.size == chunkSize && toAdd > 0) { reclaimedChunks.add(c); } else { // remove the chunk (that is not going to pool) // though it is initially from the pool or not ChunkCreator.this.removeChunk(c.getId()); } }
3.68
open-banking-gateway_HbciRedirectExecutor_redirect
/** * Redirects PSU to some page (or emits FinTech redirection required) by performing interpolation of the * string returned by {@code uiScreenUriSpel} * @param execution Execution context of the current process * @param context Current HBCI context * @param uiScreenUriSpel UI screen SpEL expression to interpolate * @param destinationUri URL where UI screen should redirect user to if he clicks OK (i.e. to ASPSP redirection * where user must click OK button in order to be redirected to ASPSP) * @param eventFactory Allows to construct custom event with redirection parameters. */ public void redirect( DelegateExecution execution, HbciContext context, String uiScreenUriSpel, String destinationUri, Function<Redirect.RedirectBuilder, ? extends Redirect> eventFactory ) { setDestinationUriInContext(execution, destinationUri); URI screenUri = ContextUtil.buildAndExpandQueryParameters(uiScreenUriSpel, context); Redirect.RedirectBuilder redirect = Redirect.builder(); redirect.processId(execution.getRootProcessInstanceId()); redirect.executionId(execution.getId()); redirect.redirectUri(screenUri); setUiUriInContext(execution, screenUri); applicationEventPublisher.publishEvent(eventFactory.apply(redirect)); }
3.68
hbase_ReplicationSourceLogQueue_enqueueLog
/** * Enqueue the wal * @param wal wal to be enqueued * @param walGroupId Key for the wal in @queues map * @return boolean whether this is the first time we are seeing this walGroupId. */ public boolean enqueueLog(Path wal, String walGroupId) { boolean exists = false; PriorityBlockingQueue<Path> queue = queues.get(walGroupId); if (queue == null) { queue = new PriorityBlockingQueue<>(queueSizePerGroup, new AbstractFSWALProvider.WALStartTimeComparator()); // make sure that we do not use an empty queue when setting up a ReplicationSource, otherwise // the shipper may quit immediately queue.put(wal); queues.put(walGroupId, queue); } else { exists = true; queue.put(wal); } // Increment size of logQueue this.metrics.incrSizeOfLogQueue(); // Compute oldest wal age this.metrics.setOldestWalAge(getOldestWalAge()); // This will wal a warning for each new wal that gets created above the warn threshold int queueSize = queue.size(); if (queueSize > this.logQueueWarnThreshold) { LOG.warn( "{} WAL group {} queue size: {} exceeds value of " + "replication.source.log.queue.warn {}", source.logPeerId(), walGroupId, queueSize, logQueueWarnThreshold); } return exists; }
3.68
rocketmq-connect_WorkerSourceTask_sendRecord
/** * Send list of sourceDataEntries to MQ. */ private Boolean sendRecord() throws InterruptedException { int processed = 0; final CalcSourceRecordWrite counter = new CalcSourceRecordWrite(toSendRecord.size(), sourceTaskMetricsGroup); for (ConnectRecord preTransformRecord : toSendRecord) { retryWithToleranceOperator.sourceRecord(preTransformRecord); ConnectRecord record = transformChain.doTransforms(preTransformRecord); String topic = maybeCreateAndGetTopic(record); Message sourceMessage = convertTransformedRecord(topic, record); if (sourceMessage == null || retryWithToleranceOperator.failed()) { // commit record recordFailed(preTransformRecord); counter.skipRecord(); continue; } log.trace("{} Appending record to the topic {} , value {}", this, topic, record.getData()); /**prepare to send record*/ Optional<RecordOffsetManagement.SubmittedPosition> submittedRecordPosition = prepareToSendRecord(preTransformRecord); try { SendCallback callback = new SendCallback() { @Override public void onSuccess(SendResult result) { log.info("Successful send message to RocketMQ:{}, Topic {}", result.getMsgId(), result.getMessageQueue().getTopic()); // complete record counter.completeRecord(); // commit record for custom recordSent(preTransformRecord, sourceMessage, result); // ack record position submittedRecordPosition.ifPresent(RecordOffsetManagement.SubmittedPosition::ack); } @Override public void onException(Throwable throwable) { log.error("Source task send record failed ,error msg {}. message {}", throwable.getMessage(), JSON.toJSONString(sourceMessage), throwable); // skip record counter.skipRecord(); // record send failed recordSendFailed(false, sourceMessage, preTransformRecord, throwable); } }; if (StringUtils.isEmpty(sourceMessage.getKeys())) { // Round robin producer.send(sourceMessage, callback); } else { // Partition message ordering, // At the same time, ensure that the data is pulled in an orderly manner, which needs to be guaranteed by sourceTask in the business producer.send(sourceMessage, new SelectMessageQueueByHash(), sourceMessage.getKeys(), callback); } } catch (RetriableException e) { log.warn("{} Failed to send record to topic '{}'. Backing off before retrying: ", this, sourceMessage.getTopic(), e); // Intercepted as successfully sent, used to continue sending next time toSendRecord = toSendRecord.subList(processed, toSendRecord.size()); // remove pre submit position, for retry submittedRecordPosition.ifPresent(RecordOffsetManagement.SubmittedPosition::remove); // retry metrics counter.retryRemaining(); return false; } catch (InterruptedException e) { log.error("Send message InterruptedException. message: {}, error info: {}.", sourceMessage, e); // throw e and stop task throw e; } catch (Exception e) { log.error("Send message MQClientException. message: {}, error info: {}.", sourceMessage, e); recordSendFailed(true, sourceMessage, preTransformRecord, e); } processed++; } toSendRecord = null; return true; }
3.68
zxing_PDF417_setCompact
/** * @param compact if true, enables compaction */ public void setCompact(boolean compact) { this.compact = compact; }
3.68
flink_PhysicalSlotRequestBulkCheckerImpl_checkPhysicalSlotRequestBulkTimeout
/** * Check the slot request bulk and timeout its requests if it has been unfulfillable for too * long. * * @param slotRequestBulk bulk of slot requests * @param slotRequestTimeout indicates how long a pending request can be unfulfillable * @return result of the check, indicating the bulk is fulfilled, still pending, or timed out */ @VisibleForTesting TimeoutCheckResult checkPhysicalSlotRequestBulkTimeout( final PhysicalSlotRequestBulkWithTimestamp slotRequestBulk, final Time slotRequestTimeout) { if (slotRequestBulk.getPendingRequests().isEmpty()) { return TimeoutCheckResult.FULFILLED; } final boolean fulfillable = isSlotRequestBulkFulfillable(slotRequestBulk, slotsRetriever); if (fulfillable) { slotRequestBulk.markFulfillable(); } else { final long currentTimestamp = clock.relativeTimeMillis(); slotRequestBulk.markUnfulfillable(currentTimestamp); final long unfulfillableSince = slotRequestBulk.getUnfulfillableSince(); if (unfulfillableSince + slotRequestTimeout.toMilliseconds() <= currentTimestamp) { return TimeoutCheckResult.TIMEOUT; } } return TimeoutCheckResult.PENDING; }
3.68
flink_OperationExpressionsUtils_extractName
/** * Extracts name from given expression if it has one. Expressions that have names are: * * <ul> * <li>{@link FieldReferenceExpression} * <li>{@link TableReferenceExpression} * <li>{@link LocalReferenceExpression} * <li>{@link BuiltInFunctionDefinitions#AS} * </ul> * * @param expression expression to extract name from * @return optional name of given expression */ public static Optional<String> extractName(Expression expression) { return expression.accept(extractNameVisitor); }
3.68
streampipes_PipelineApi_start
/** * Starts a pipeline by given id * * @param pipeline The pipeline * @return {@link org.apache.streampipes.model.pipeline.PipelineOperationStatus} the status message after invocation */ @Override public PipelineOperationStatus start(Pipeline pipeline) { return start(pipeline.getPipelineId()); }
3.68
flink_FlinkImageBuilder_setBaseImage
/** * Sets base image. * * @param baseImage The base image. * @return A reference to this Builder. */ public FlinkImageBuilder setBaseImage(String baseImage) { this.baseImage = baseImage; return this; }
3.68
framework_SerializerHelper_resolveClass
/** * Resolves the class given by {@code className}. * * @param className * The fully qualified class name. * @return A {@code Class} reference. * @throws ClassNotFoundException * If the class could not be resolved. */ public static Class<?> resolveClass(String className) throws ClassNotFoundException { for (Class<?> c : PRIMITIVE_CLASSES) { if (className.equals(c.getName())) { return c; } } return Class.forName(className); }
3.68
flink_EnvironmentInformation_getGitCommitIdAbbrev
/** @return The last known abbreviated commit id of this version of the software. */ public static String getGitCommitIdAbbrev() { return getVersionsInstance().gitCommitIdAbbrev; }
3.68
pulsar_ObjectMapperFactory_getThreadLocalYaml
/** * This method is deprecated. Use {@link #getYamlMapper()} and {@link MapperReference#getObjectMapper()} */ @Deprecated public static ObjectMapper getThreadLocalYaml() { return getYamlMapper().getObjectMapper(); }
3.68
framework_VScrollTable_updateFirstVisibleRow
// Updates first visible row for the case we cannot wait // for onScroll private void updateFirstVisibleRow() { scrollTop = scrollBodyPanel.getScrollPosition(); firstRowInViewPort = calcFirstRowInViewPort(); int maxFirstRow = totalRows - pageLength; if (firstRowInViewPort > maxFirstRow && maxFirstRow >= 0) { firstRowInViewPort = maxFirstRow; } lastRequestedFirstvisible = firstRowInViewPort; client.updateVariable(paintableId, "firstvisible", firstRowInViewPort, false); }
3.68
flink_PrioritizedOperatorSubtaskState_resolvePrioritizedAlternatives
/** * This helper method resolves the dependencies between the ground truth of the operator * state obtained from the job manager and potential alternatives for recovery, e.g. from a * task-local source. */ <T extends StateObject> List<StateObjectCollection<T>> resolvePrioritizedAlternatives( StateObjectCollection<T> jobManagerState, List<StateObjectCollection<T>> alternativesByPriority, BiFunction<T, T, Boolean> approveFun) { // Nothing to resolve if there are no alternatives, or the ground truth has already no // state, or if we can assume that a rescaling happened because we find more than one // handle in the JM state // (this is more a sanity check). if (alternativesByPriority == null || alternativesByPriority.isEmpty() || !jobManagerState.hasState() || jobManagerState.size() != 1) { return Collections.singletonList(jobManagerState); } // As we know size is == 1 T reference = jobManagerState.iterator().next(); // This will contain the end result, we initialize it with the potential max. size. List<StateObjectCollection<T>> approved = new ArrayList<>(1 + alternativesByPriority.size()); for (StateObjectCollection<T> alternative : alternativesByPriority) { // We found an alternative to the JM state if it has state, we have a 1:1 // relationship, and the approve-function signaled true. if (alternative != null && alternative.hasState() && alternative.size() == 1 && BooleanUtils.isTrue( approveFun.apply(reference, alternative.iterator().next()))) { approved.add(alternative); } } // Of course we include the ground truth as last alternative. approved.add(jobManagerState); return Collections.unmodifiableList(approved); }
3.68
framework_VScrollTable_updateFirstVisibleAndScrollIfNeeded
/** For internal use only. May be removed or replaced in the future. */ public void updateFirstVisibleAndScrollIfNeeded(UIDL uidl) { firstvisible = uidl.hasVariable("firstvisible") ? uidl.getIntVariable("firstvisible") : 0; firstvisibleOnLastPage = uidl.hasVariable("firstvisibleonlastpage") ? uidl.getIntVariable("firstvisibleonlastpage") : -1; if (firstvisible != lastRequestedFirstvisible && scrollBody != null) { // Update lastRequestedFirstvisible right away here // (don't rely on update in the timer which could be cancelled). lastRequestedFirstvisible = firstRowInViewPort; // Only scroll if the first visible changes from the server side. // Else we might unintentionally scroll even when the scroll // position has not changed. enableLazyScroller(); } }
3.68
hudi_ClusteringUtils_getRequestedReplaceMetadata
/** * Get requested replace metadata from timeline. * @param metaClient * @param pendingReplaceInstant * @return * @throws IOException */ private static Option<HoodieRequestedReplaceMetadata> getRequestedReplaceMetadata(HoodieTableMetaClient metaClient, HoodieInstant pendingReplaceInstant) throws IOException { final HoodieInstant requestedInstant; if (!pendingReplaceInstant.isRequested()) { // inflight replacecommit files don't have clustering plan. // This is because replacecommit inflight can have workload profile for 'insert_overwrite'. // Get the plan from corresponding requested instant. requestedInstant = HoodieTimeline.getReplaceCommitRequestedInstant(pendingReplaceInstant.getTimestamp()); } else { requestedInstant = pendingReplaceInstant; } Option<byte[]> content = metaClient.getActiveTimeline().getInstantDetails(requestedInstant); if (!content.isPresent() || content.get().length == 0) { // few operations create requested file without any content. Assume these are not clustering return Option.empty(); } return Option.of(TimelineMetadataUtils.deserializeRequestedReplaceMetadata(content.get())); }
3.68
hadoop_AzureADAuthenticator_getHttpErrorCode
/** * Gets Http error status code. * @return http error code. */ public int getHttpErrorCode() { return this.httpErrorCode; }
3.68
flink_RpcEndpoint_getHostname
/** * Gets the hostname of the underlying RPC endpoint. * * @return Hostname on which the RPC endpoint is running */ @Override public String getHostname() { return rpcServer.getHostname(); }
3.68
hbase_SnapshotInfo_isSnapshotCorrupted
/** Returns true if the snapshot is corrupted */ public boolean isSnapshotCorrupted() { return hfilesMissing.get() > 0 || logsMissing.get() > 0 || hfilesCorrupted.get() > 0; }
3.68
hadoop_RemoteMethod_getMethod
/** * Get the represented java method. * * @return {@link Method} * @throws IOException If the method cannot be found. */ public Method getMethod() throws IOException { try { if (types != null) { return protocol.getDeclaredMethod(methodName, types); } else { return protocol.getDeclaredMethod(methodName); } } catch (NoSuchMethodException e) { // Re-throw as an IOException LOG.error("Cannot get method {} with types {} from {}", methodName, Arrays.toString(types), protocol.getSimpleName(), e); throw new IOException(e); } catch (SecurityException e) { LOG.error("Cannot access method {} with types {} from {}", methodName, Arrays.toString(types), protocol.getSimpleName(), e); throw new IOException(e); } }
3.68
hudi_HoodieROTablePathFilter_safeGetParentsParent
/** * Obtain the path, two levels from provided path. * * @return said path if available, null otherwise */ private Path safeGetParentsParent(Path path) { if (path.getParent() != null && path.getParent().getParent() != null && path.getParent().getParent().getParent() != null) { return path.getParent().getParent().getParent(); } return null; }
3.68
streampipes_StatementUtils_getStatement
/** * This method checks if the user input is correct. When not null is returned * * @param s * @return */ public static Statement getStatement(String s) { Statement result = new Statement(); String[] parts = s.split(";"); // default case if (parts.length == 2) { if (parts[0].equals("*")) { result.setOperator(parts[0]); result.setLabel(parts[1]); return result; } else { return null; } } // all other valid cases if (parts.length == 3) { if (parts[0].equals(">") || parts[0].equals("<") || parts[0].equals("=")) { result.setOperator(parts[0]); } else { return null; } if (isNumeric(parts[1].replaceAll("-", ""))) { result.setValue(Double.parseDouble(parts[1])); } else { return null; } result.setLabel(parts[2]); return result; } else { return null; } }
3.68
AreaShop_GithubUpdateCheck_getAuthor
/** * Get the author that this update checker is checking. * @return Used author */ public String getAuthor() { return author; }
3.68
pulsar_AbstractMetrics_populateDimensionMap
/** * Helper to manage populating topics map. * * @param ledgersByDimensionMap * @param metrics * @param ledger */ protected void populateDimensionMap(Map<Metrics, List<ManagedLedgerImpl>> ledgersByDimensionMap, Metrics metrics, ManagedLedgerImpl ledger) { ledgersByDimensionMap.computeIfAbsent(metrics, __ -> new ArrayList<>()).add(ledger); }
3.68
hbase_SyncTable_compareCellKeysWithinRow
/** * Compare families, qualifiers, and timestamps of the given Cells. They are assumed to be of * the same row. Nulls are after non-nulls. */ private int compareCellKeysWithinRow(Cell c1, Cell c2) { if (c1 == null) { return 1; // source missing cell } if (c2 == null) { return -1; // target missing cell } int result = CellComparator.getInstance().compareFamilies(c1, c2); if (result != 0) { return result; } result = CellComparator.getInstance().compareQualifiers(c1, c2); if (result != 0) { return result; } if (this.ignoreTimestamp) { return 0; } else { // note timestamp comparison is inverted - more recent cells first return CellComparator.getInstance().compareTimestamps(c1, c2); } }
3.68