name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hadoop_ActiveAuditManagerS3A_createSpan
/** * Start an operation; as well as invoking the audit * service to do this, sets the operation as the * active operation for this thread. * @param operation operation name. * @param path1 first path of operation * @param path2 second path of operation * @return a wrapped audit span * @throws IOException failure */ @Override public AuditSpanS3A createSpan(final String operation, @Nullable final String path1, @Nullable final String path2) throws IOException { // must be started Preconditions.checkState(isInState(STATE.STARTED), "Audit Manager %s is in wrong state: %s", this, getServiceState()); ioStatisticsStore.incrementCounter( Statistic.AUDIT_SPAN_CREATION.getSymbol()); return setActiveThreadSpan(auditor.createSpan( operation, path1, path2)); }
3.68
aws-saas-boost_KeycloakApi_toKeycloakUserNonExtensible
/* * This is not a preferred implementation, since Keycloak upgrades can lead to invisible bugs where * user attributes are silently dropped when during normal system-user-service operation due to their * not being properly set during upgrade operations. */ private static UserRepresentation toKeycloakUserNonExtensible(Map<String, Object> user) { UserRepresentation keycloakUser = null; if (user != null) { keycloakUser = new UserRepresentation(); keycloakUser.setId((String) user.get("id")); keycloakUser.setCreatedTimestamp((Long) user.get("createdTimestamp")); keycloakUser.setEnabled((Boolean) user.get("enabled")); keycloakUser.setUsername((String) user.get("username")); keycloakUser.setFirstName((String) user.get("firstName")); keycloakUser.setLastName((String) user.get("lastName")); keycloakUser.setEmail((String) user.get("email")); keycloakUser.setEmailVerified((Boolean) user.get("emailVerified")); keycloakUser.setRequiredActions((List<String>) user.get("requiredActions")); keycloakUser.setDisableableCredentialTypes((Set<String>) user.get("disableableCredentialTypes")); keycloakUser.setNotBefore((Integer) user.get("notBefore")); keycloakUser.setAccess((Map<String, Boolean>) user.get("access")); } return keycloakUser; }
3.68
flink_Dispatcher_getJobMasterGateway
/** Ensures that the JobMasterGateway is available. */ private CompletableFuture<JobMasterGateway> getJobMasterGateway(JobID jobId) { if (!jobManagerRunnerRegistry.isRegistered(jobId)) { return FutureUtils.completedExceptionally(new FlinkJobNotFoundException(jobId)); } final JobManagerRunner job = jobManagerRunnerRegistry.get(jobId); if (!job.isInitialized()) { return FutureUtils.completedExceptionally( new UnavailableDispatcherOperationException( "Unable to get JobMasterGateway for initializing job. " + "The requested operation is not available while the JobManager is initializing.")); } return job.getJobMasterGateway(); }
3.68
framework_AbstractComponentConnector_delegateCaptionHandling
/* * (non-Javadoc) * * @see com.vaadin.client.ComponentConnector#delegateCaptionHandling () */ @Override public boolean delegateCaptionHandling() { return true; }
3.68
rocketmq-connect_ConnectUtil_currentOffsets
/** Get consumer group offset */ public static Map<MessageQueue, Long> currentOffsets(WorkerConfig config, String groupName, List<String> topics, Set<MessageQueue> messageQueues) { // Get consumer group offset DefaultMQAdminExt adminClient = null; try { adminClient = startMQAdminTool(config); Map<MessageQueue, OffsetWrapper> consumerOffsets = Maps.newConcurrentMap(); for (String topic : topics) { ConsumeStats consumeStats = adminClient.examineConsumeStats(groupName, topic); consumerOffsets.putAll(consumeStats.getOffsetTable()); } return consumerOffsets.keySet().stream() .filter(messageQueue -> messageQueues.contains(messageQueue)) .collect( Collectors.toMap( messageQueue -> messageQueue, messageQueue -> consumerOffsets.get(messageQueue).getConsumerOffset())); } catch (MQClientException | MQBrokerException | RemotingException | InterruptedException e) { if (e instanceof MQClientException) { if (((MQClientException) e).getResponseCode() == ResponseCode.TOPIC_NOT_EXIST) { return Collections.emptyMap(); } else { throw new RuntimeException(e); } } else { throw new RuntimeException(e); } } finally { if (adminClient != null) { adminClient.shutdown(); } } }
3.68
framework_HierarchicalDataCommunicator_expand
/** * Expands the given item at the given index. Calling this method will have * no effect if the item is already expanded. * * @param item * the item to expand * @param index * the index of the item */ public void expand(T item, Integer index) { doExpand(item, index, true); }
3.68
hibernate-validator_UUIDValidator_hasCorrectLetterCase
/** * Validates the letter case of the given character depending on the letter case parameter * * @param ch The letter to be tested * * @return {@code true} if the character is in the specified letter case or letter case is not specified */ private boolean hasCorrectLetterCase(char ch) { if ( letterCase == null ) { return true; } if ( letterCase == LetterCase.LOWER_CASE && !Character.isLowerCase( ch ) ) { return false; } return letterCase != LetterCase.UPPER_CASE || Character.isUpperCase( ch ); }
3.68
framework_DesignAttributeHandler_findSetterForAttribute
/** * Returns a setter that can be used for assigning the given design * attribute to the class * * @param clazz * the class that is scanned for setters * @param attribute * the design attribute to find setter for * @return the setter method or null if not found */ private static Method findSetterForAttribute(Class<?> clazz, String attribute) { resolveSupportedAttributes(clazz); return CACHE.get(clazz).getSetter(attribute); }
3.68
mutate-test-kata_CompanyFixed_findEmployeeById
/** * Finds an employee by their id * @param id the id of the employee to be found * @return the employee with the id passed as the parameter or null if no such employee exists */ public EmployeeFixed findEmployeeById(String id) { return this.employees.stream() .filter(e -> e.getId().equals(id)) .findFirst() .orElse(null); }
3.68
dubbo_ReferenceConfigBase_determineInterfaceClass
/** * Determine the interface of the proxy class * * @param generic * @param interfaceName * @return */ public static Class<?> determineInterfaceClass(String generic, String interfaceName) { return determineInterfaceClass(generic, interfaceName, ClassUtils.getClassLoader()); }
3.68
dubbo_ReflectUtils_getName
/** * get constructor name. * "()", "(java.lang.String,int)" * * @param c constructor. * @return name. */ public static String getName(final Constructor<?> c) { StringBuilder ret = new StringBuilder("("); Class<?>[] parameterTypes = c.getParameterTypes(); for (int i = 0; i < parameterTypes.length; i++) { if (i > 0) { ret.append(','); } ret.append(getName(parameterTypes[i])); } ret.append(')'); return ret.toString(); }
3.68
flink_AvgAggFunction_getValueExpression
/** If all input are nulls, count will be 0 and we will get null after the division. */ @Override public Expression getValueExpression() { Expression ifTrue = nullOf(getResultType()); Expression ifFalse = cast(div(sum, count), typeLiteral(getResultType())); return ifThenElse(equalTo(count, literal(0L)), ifTrue, ifFalse); }
3.68
flink_BaseHybridHashTable_ensureNumBuffersReturned
/** * This method makes sure that at least a certain number of memory segments is in the list of * free segments. Free memory can be in the list of free segments, or in the return-queue where * segments used to write behind are put. The number of segments that are in that return-queue, * but are actually reclaimable is tracked. This method makes sure at least a certain number of * buffers is reclaimed. * * @param minRequiredAvailable The minimum number of buffers that needs to be reclaimed. */ public void ensureNumBuffersReturned(final int minRequiredAvailable) { if (minRequiredAvailable > internalPool.freePages() + this.buildSpillRetBufferNumbers) { throw new IllegalArgumentException( "More buffers requested available than totally available."); } try { while (internalPool.freePages() < minRequiredAvailable) { returnPage(this.buildSpillReturnBuffers.take()); this.buildSpillRetBufferNumbers--; } } catch (InterruptedException iex) { throw new RuntimeException("Hash Join was interrupted."); } }
3.68
pulsar_BrokerService_updateDynamicServiceConfiguration
/** * Updates pulsar.ServiceConfiguration's dynamic field with value persistent into zk-dynamic path. It also validates * dynamic-value before updating it and throws {@code IllegalArgumentException} if validation fails */ private void updateDynamicServiceConfiguration() { Optional<Map<String, String>> configCache = Optional.empty(); try { configCache = pulsar().getPulsarResources().getDynamicConfigResources().getDynamicConfiguration(); // create dynamic-config if not exist. if (!configCache.isPresent()) { pulsar().getPulsarResources().getDynamicConfigResources() .setDynamicConfigurationWithCreate(n -> new HashMap<>()); } } catch (Exception e) { log.warn("Failed to read dynamic broker configuration", e); } configCache.ifPresent(stringStringMap -> stringStringMap.forEach((key, value) -> { // validate field if (dynamicConfigurationMap.containsKey(key) && dynamicConfigurationMap.get(key).validator != null) { if (!dynamicConfigurationMap.get(key).validator.test(value)) { log.error("Failed to validate dynamic config {} with value {}", key, value); throw new IllegalArgumentException( String.format("Failed to validate dynamic-config %s/%s", key, value)); } } // update field value try { Field field = ServiceConfiguration.class.getDeclaredField(key); if (field != null && field.isAnnotationPresent(FieldContext.class)) { field.setAccessible(true); field.set(pulsar().getConfiguration(), FieldParser.value(value, field)); log.info("Successfully updated {}/{}", key, value); } } catch (Exception e) { log.warn("Failed to update service configuration {}/{}, {}", key, value, e.getMessage()); } })); }
3.68
hbase_MetaTableLocator_setMetaLocation
/** * Sets the location of <code>hbase:meta</code> in ZooKeeper to the specified server address. * @param zookeeper reference to the {@link ZKWatcher} which also contains configuration and * operation * @param serverName the name of the server * @param replicaId the ID of the replica * @param state the state of the region * @throws KeeperException if a ZooKeeper operation fails */ public static void setMetaLocation(ZKWatcher zookeeper, ServerName serverName, int replicaId, RegionState.State state) throws KeeperException { if (serverName == null) { LOG.warn("Tried to set null ServerName in hbase:meta; skipping -- ServerName required"); return; } LOG.info("Setting hbase:meta replicaId={} location in ZooKeeper as {}, state={}", replicaId, serverName, state); // Make the MetaRegionServer pb and then get its bytes and save this as // the znode content. MetaRegionServer pbrsr = MetaRegionServer.newBuilder().setServer(ProtobufUtil.toServerName(serverName)) .setRpcVersion(HConstants.RPC_CURRENT_VERSION).setState(state.convert()).build(); byte[] data = ProtobufUtil.prependPBMagic(pbrsr.toByteArray()); try { ZKUtil.setData(zookeeper, zookeeper.getZNodePaths().getZNodeForReplica(replicaId), data); } catch (KeeperException.NoNodeException nne) { if (replicaId == RegionInfo.DEFAULT_REPLICA_ID) { LOG.debug("hbase:meta region location doesn't exist, create it"); } else { LOG.debug( "hbase:meta region location doesn't exist for replicaId=" + replicaId + ", create it"); } ZKUtil.createAndWatch(zookeeper, zookeeper.getZNodePaths().getZNodeForReplica(replicaId), data); } }
3.68
flink_MemoryBackendCheckpointStorageAccess_getMaxStateSize
/** Gets the size (in bytes) that a individual chunk of state may have at most. */ public int getMaxStateSize() { return maxStateSize; }
3.68
hadoop_ManifestCommitter_getTaskManifestPath
/** * The path to where the manifest file of a task attempt will be * saved when the task is committed. * This path will be the same for all attempts of the same task. * @param context the context of the task attempt. * @return the path where a task attempt should be stored. */ @VisibleForTesting public Path getTaskManifestPath(TaskAttemptContext context) { final Path dir = enterCommitter(false, context).getTaskManifestDir(); return manifestPathForTask(dir, context.getTaskAttemptID().getTaskID().toString()); }
3.68
hadoop_FutureDataInputStreamBuilderImpl_initFromFS
/** * Initialize from a filesystem. */ private void initFromFS() { bufferSize = fileSystem.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT); }
3.68
framework_TableQuery_storeRowImmediately
/** * Inserts the given row in the database table immediately. Begins and * commits the transaction needed. This method was added specifically to * solve the problem of returning the final RowId immediately on the * SQLContainer.addItem() call when auto commit mode is enabled in the * SQLContainer. * * @param row * RowItem to add to the database * @return Final RowId of the added row * @throws SQLException */ public RowId storeRowImmediately(RowItem row) throws SQLException { beginTransaction(); /* Set version column, if one is provided */ setVersionColumnFlagInProperty(row); /* Generate query */ StatementHelper sh = sqlGenerator .generateInsertQuery(getFullTableName(), row); Connection connection = null; PreparedStatement pstmt = null; ResultSet generatedKeys = null; connection = getConnection(); try { pstmt = connection.prepareStatement(sh.getQueryString(), primaryKeyColumns.toArray(new String[0])); sh.setParameterValuesToStatement(pstmt); getLogger().log(Level.FINE, "DB -> {0}", sh.getQueryString()); int result = pstmt.executeUpdate(); RowId newId = null; if (result > 0) { /* * If affected rows exist, we'll get the new RowId, commit the * transaction and return the new RowId. */ generatedKeys = pstmt.getGeneratedKeys(); newId = getNewRowId(row, generatedKeys); } // transaction has to be closed in any case commit(); return newId; } finally { releaseConnection(connection, pstmt, generatedKeys); } }
3.68
shardingsphere-elasticjob_JobScheduleController_parseTimeZoneString
/** * Get the TimeZone for the time zone specification. * * @param timeZoneString must start with "GMT", such as "GMT+8:00" * @return the specified TimeZone, or the GMT zone if the `timeZoneString` cannot be understood. */ private TimeZone parseTimeZoneString(final String timeZoneString) { if (Strings.isNullOrEmpty(timeZoneString)) { return TimeZone.getDefault(); } Preconditions.checkArgument(timeZoneString.startsWith("GMT"), "Invalid time zone specification '%s'.", timeZoneString); return TimeZone.getTimeZone(timeZoneString); }
3.68
flink_DataStream_getOutput
/** * Returns an iterator over the collected elements. The returned iterator must only be used * once the job execution was triggered. * * <p>This method will always return the same iterator instance. * * @return iterator over collected elements */ public CloseableIterator<T> getOutput() { // we intentionally fail here instead of waiting, because it indicates a // misunderstanding on the user and would usually just block the application Preconditions.checkNotNull(iterator, "The job execution was not yet started."); return iterator; }
3.68
graphhopper_GHMRequest_putHint
// a good trick to serialize unknown properties into the HintsMap @JsonAnySetter public GHMRequest putHint(String fieldName, Object value) { hints.putObject(fieldName, value); return this; }
3.68
hadoop_ClusterMetrics_getMapSlotCapacity
/** * Get the total number of map slots in the cluster. * * @return map slot capacity */ public int getMapSlotCapacity() { return totalMapSlots; }
3.68
morf_SelectStatement_allowParallelDml
/** * Request that this query can contribute towards a parallel DML execution plan. * If a select statement is used within a DML statement, some dialects require DML parallelisation to be enabled via the select statement. * If the database implementation does not support, or is configured to disable parallel query execution, then this request will have no effect. * * <p>For queries that are likely to change a lot of data, a parallel execution plan may result in the results being written faster, although the exact effect depends on * the underlying database, the nature of the data.</p> * * <p>Note that the use cases of this are rare. Caution is needed because if multiple requests are made by the application to run parallel queries, the resulting resource contention may result in worse performance - this is not intended for queries that are submitted in parallel by the application.</p> * * @return this, for method chaining. * @see #withParallelQueryPlan() */ public SelectStatement allowParallelDml() { return copyOnWriteOrMutate( SelectStatementBuilder::allowParallelDml, () -> this.hints.add(AllowParallelDmlHint.INSTANCE) ); }
3.68
framework_SharedUtil_trimTrailingSlashes
/** * Trims trailing slashes (if any) from a string. * * @param value * The string value to be trimmed. Cannot be null. * @return String value without trailing slashes. */ public static String trimTrailingSlashes(String value) { return value.replaceAll("/*$", ""); }
3.68
framework_FileUploadHandler_getProgressEventInterval
/** * To prevent event storming, streaming progress events are sent in this * interval rather than every time the buffer is filled. This fixes #13155. * To adjust this value override the method, and register your own handler * in VaadinService.createRequestHandlers(). The default is 500ms, and * setting it to 0 effectively restores the old behavior. */ protected int getProgressEventInterval() { return DEFAULT_STREAMING_PROGRESS_EVENT_INTERVAL_MS; }
3.68
flink_RetryPredicates_createExceptionTypePredicate
/** * Creates a predicate on given exception type. * * @param exceptionClass * @return predicate on exception type. */ public static ExceptionTypePredicate createExceptionTypePredicate( @Nonnull Class<? extends Throwable> exceptionClass) { return new ExceptionTypePredicate(exceptionClass); }
3.68
pulsar_BrokerService_startProtocolHandlers
// This call is used for starting additional protocol handlers public void startProtocolHandlers( Map<String, Map<InetSocketAddress, ChannelInitializer<SocketChannel>>> protocolHandlers) { protocolHandlers.forEach((protocol, initializers) -> { initializers.forEach((address, initializer) -> { try { startProtocolHandler(protocol, address, initializer); } catch (IOException e) { log.error("{}", e.getMessage(), e.getCause()); throw new RuntimeException(e.getMessage(), e.getCause()); } }); }); }
3.68
hbase_MiniHBaseCluster_shutdown
/** * Shut down the mini HBase cluster */ @Override public void shutdown() throws IOException { if (this.hbaseCluster != null) { this.hbaseCluster.shutdown(); } }
3.68
hbase_VisibilityLabelsCache_getGroupAuthsAsOrdinals
/** * Returns the list of ordinals of labels associated with the groups * @return the list of ordinals */ public Set<Integer> getGroupAuthsAsOrdinals(String[] groups) { this.lock.readLock().lock(); try { Set<Integer> authOrdinals = new HashSet<>(); if (groups != null && groups.length > 0) { Set<Integer> groupAuthOrdinals = null; for (String group : groups) { groupAuthOrdinals = groupAuths.get(group); if (groupAuthOrdinals != null && !groupAuthOrdinals.isEmpty()) { authOrdinals.addAll(groupAuthOrdinals); } } } return (authOrdinals.isEmpty()) ? EMPTY_SET : authOrdinals; } finally { this.lock.readLock().unlock(); } }
3.68
flink_TableResult_getTableSchema
/** * Returns the schema of the result. * * @deprecated This method has been deprecated as part of FLIP-164. {@link TableSchema} has been * replaced by two more dedicated classes {@link Schema} and {@link ResolvedSchema}. Use * {@link Schema} for declaration in APIs. {@link ResolvedSchema} is offered by the * framework after resolution and validation. */ @Deprecated default TableSchema getTableSchema() { return TableSchema.fromResolvedSchema(getResolvedSchema()); }
3.68
framework_TabSheetElement_openTab
/** * Opens the given tab by clicking its caption text or icon. If the tab has * neither text caption nor icon, clicks at a fixed position. * * @param tabCell * The tab to be opened. */ private void openTab(WebElement tabCell) { // Open the tab by clicking its caption text if it exists. List<WebElement> tabCaptions = tabCell.findElements(byCaption); if (!tabCaptions.isEmpty()) { tabCaptions.get(0).click(); return; } // If no caption text was found, click the icon of the tab. List<WebElement> tabIcons = tabCell .findElements(By.className("v-icon")); if (!tabIcons.isEmpty()) { tabIcons.get(0).click(); return; } // If neither text nor icon caption was found, click at a position that // is unlikely to close the tab. if (BrowserUtil.isIE(getCapabilities())) { // old default, offset calculated from top left ((TestBenchElement) tabCell).click(10, 10); } else { // w3c compliant, offset calculated from middle ((TestBenchElement) tabCell).click(-5, 0); } }
3.68
framework_Upload_addFailedListener
/** * Adds the upload interrupted event listener. * * @param listener * the Listener to be added, not null * @since 8.0 */ public Registration addFailedListener(FailedListener listener) { return addListener(FailedEvent.class, listener, UPLOAD_FAILED_METHOD); }
3.68
hbase_MobFile_open
/** * Opens the underlying reader. It's not thread-safe. Use MobFileCache.openFile() instead. */ public void open() throws IOException { sf.initReader(); }
3.68
graphhopper_GraphHopper_postProcessing
/** * Runs both after the import and when loading an existing Graph * * @param closeEarly release resources as early as possible */ protected void postProcessing(boolean closeEarly) { initLocationIndex(); importPublicTransit(); if (closeEarly) { boolean includesCustomProfiles = profilesByName.values().stream().anyMatch(p -> CustomWeighting.NAME.equals(p.getWeighting())); if (!includesCustomProfiles) // when there are custom profiles we must not close way geometry or KVStorage, because // they might be needed to evaluate the custom weightings for the following preparations baseGraph.flushAndCloseGeometryAndNameStorage(); } if (lmPreparationHandler.isEnabled()) loadOrPrepareLM(closeEarly); if (closeEarly) // we needed the location index for the LM preparation, but we don't need it for CH locationIndex.close(); if (chPreparationHandler.isEnabled()) loadOrPrepareCH(closeEarly); }
3.68
hudi_BaseHoodieTableServiceClient_rollbackFailedWrites
/** * Rollback all failed writes. * * @return true if rollback was triggered. false otherwise. */ protected Boolean rollbackFailedWrites() { HoodieTable table = createTable(config, hadoopConf); List<String> instantsToRollback = getInstantsToRollback(table.getMetaClient(), config.getFailedWritesCleanPolicy(), Option.empty()); Map<String, Option<HoodiePendingRollbackInfo>> pendingRollbacks = getPendingRollbackInfos(table.getMetaClient()); instantsToRollback.forEach(entry -> pendingRollbacks.putIfAbsent(entry, Option.empty())); rollbackFailedWrites(pendingRollbacks); return !pendingRollbacks.isEmpty(); }
3.68
streampipes_TreeUtils_findByDomId
/** * @param id the DOM ID * @param graphs list of invocation graphs * @return an invocation graph with a given DOM Id */ public static InvocableStreamPipesEntity findByDomId(String id, List<InvocableStreamPipesEntity> graphs) { for (InvocableStreamPipesEntity graph : graphs) { if (graph.getDom().equals(id)) { return graph; } } //TODO return null; }
3.68
morf_DirectoryDataSet_clearDestination
/** * @see org.alfasoftware.morf.xml.XmlStreamProvider.XmlOutputStreamProvider#clearDestination() */ @Override public void clearDestination() { for (File file : directory.listFiles()) { // skip files/folders that start with . such as .svn if (file.getName().startsWith(".")) continue; deleteFileOrDirectory(file); } }
3.68
hibernate-validator_ConstrainedParameter_merge
/** * Creates a new constrained parameter object by merging this and the given * other parameter. * * @param other The parameter to merge. * * @return A merged parameter. */ public ConstrainedParameter merge(ConstrainedParameter other) { ConfigurationSource mergedSource = ConfigurationSource.max( source, other.source ); Set<MetaConstraint<?>> mergedConstraints = newHashSet( constraints ); mergedConstraints.addAll( other.constraints ); Set<MetaConstraint<?>> mergedTypeArgumentConstraints = new HashSet<>( typeArgumentConstraints ); mergedTypeArgumentConstraints.addAll( other.typeArgumentConstraints ); CascadingMetaDataBuilder mergedCascadingMetaData = cascadingMetaDataBuilder.merge( other.cascadingMetaDataBuilder ); return new ConstrainedParameter( mergedSource, callable, type, index, mergedConstraints, mergedTypeArgumentConstraints, mergedCascadingMetaData ); }
3.68
flink_FieldSet_toArray
/** * Transforms the field set into an array of field IDs. Whether the IDs are ordered or unordered * depends on the specific subclass of the field set. * * @return An array of all contained field IDs. */ public int[] toArray() { int[] a = new int[this.collection.size()]; int i = 0; for (int col : this.collection) { a[i++] = col; } return a; }
3.68
framework_ConnectorTypeWriter_write
/** * Writes a JSON object containing connector-ID-to-type-ID mappings for each * dirty Connector in the given UI. * * @param ui * The {@link UI} containing dirty connectors * @param writer * The {@link Writer} used to write the JSON. * @param target * The paint target containing the connector type IDs. * @throws IOException * If the serialization fails. */ public void write(UI ui, Writer writer, PaintTarget target) throws IOException { Collection<ClientConnector> dirtyVisibleConnectors = ui .getConnectorTracker().getDirtyVisibleConnectors(); JsonObject connectorTypes = Json.createObject(); for (ClientConnector connector : dirtyVisibleConnectors) { String connectorType = target.getTag(connector); try { connectorTypes.put(connector.getConnectorId(), connectorType); } catch (JsonException e) { throw new PaintException( "Failed to send connector type for connector " + connector.getConnectorId() + ": " + e.getMessage(), e); } } writer.write(JsonUtil.stringify(connectorTypes)); }
3.68
hadoop_PerGpuUtilizations_getOverallGpuUtilization
/** * Overall percent GPU utilization * @return utilization */ @XmlJavaTypeAdapter(PerGpuDeviceInformation.StrToFloatBeforeSpaceAdapter.class) @XmlElement(name = "gpu_util") public Float getOverallGpuUtilization() { return overallGpuUtilization; }
3.68
open-banking-gateway_ConsentAccessFactory_consentForAnonymousPsu
/** * Consent access for Anonymous PSU (does not require login to OBG)-ASPSP tuple. * @param aspsp ASPSP(bank) that grants consent * @param session Service session for this consent * @return New consent access template */ public ConsentAccess consentForAnonymousPsu(Fintech fintech, Bank aspsp, ServiceSession session) { return new AnonymousPsuConsentAccess(aspsp, fintech, fintechPubKeys, psuEncryption, session, consentRepository, encServiceProvider, encryptionKeySerde); }
3.68
rocketmq-connect_StateManagementService_persist
/** * Persist all the configs in a store. */ default void persist() { }
3.68
hbase_RegionHDFSBlockLocationFinder_scheduleFullRefresh
/** * Refresh all the region locations. * @return true if user created regions got refreshed. */ private boolean scheduleFullRefresh() { ClusterInfoProvider service = this.provider; // Protect from anything being null while starting up. if (service == null) { return false; } // TODO: Should this refresh all the regions or only the ones assigned? boolean includesUserTables = false; for (final RegionInfo hri : service.getAssignedRegions()) { cache.refresh(hri); includesUserTables |= !hri.getTable().isSystemTable(); } return includesUserTables; }
3.68
framework_AbstractDateField_convertFromDateString
/** * Parses string representation of date range limit into date type. * * @param temporalStr * the string representation * @return parsed value * @see AbstractDateFieldState#rangeStart * @see AbstractDateFieldState#rangeEnd * @since 8.4 */ protected T convertFromDateString(String temporalStr) { if (temporalStr == null) { return null; } return toType(RANGE_FORMATTER.parse(temporalStr)); }
3.68
hadoop_Cluster_cancelDelegationToken
/** * Cancel a delegation token from the JobTracker * @param token the token to cancel * @throws IOException * @deprecated Use {@link Token#cancel} instead */ public void cancelDelegationToken(Token<DelegationTokenIdentifier> token ) throws IOException, InterruptedException { token.cancel(getConf()); }
3.68
pulsar_FunctionMetaDataManager_containsFunction
/** * Check if the function exists. * @param tenant tenant that the function belongs to * @param namespace namespace that the function belongs to * @param functionName name of function * @return true if function exists and false if it does not */ public synchronized boolean containsFunction(String tenant, String namespace, String functionName) { return containsFunctionMetaData(tenant, namespace, functionName); }
3.68
flink_AbstractCollectResultBuffer_next
/** Get next user visible result, returns null if currently there is no more. */ public T next() { if (userVisibleHead == userVisibleTail) { return null; } T ret = buffer.removeFirst(); userVisibleHead++; sanityCheck(); return ret; }
3.68
hbase_ThriftHBaseServiceHandler_addScanner
/** * Assigns a unique ID to the scanner and adds the mapping to an internal HashMap. * @param scanner to add * @return Id for this Scanner */ private int addScanner(ResultScanner scanner) { int id = nextScannerId.getAndIncrement(); scannerMap.put(id, scanner); return id; }
3.68
flink_TieredStorageProducerClient_writeAccumulatedBuffer
/** * Write the accumulated buffer of this subpartitionId to an appropriate tier. After the tier is * decided, the buffer will be written to the selected tier. * * <p>Note that the method only throws an exception when choosing a storage tier, so the caller * should ensure that the buffer is recycled when throwing an exception. * * @param subpartitionId the subpartition identifier * @param accumulatedBuffer one accumulated buffer of this subpartition */ private void writeAccumulatedBuffer( TieredStorageSubpartitionId subpartitionId, Buffer accumulatedBuffer) throws IOException { Buffer compressedBuffer = compressBufferIfPossible(accumulatedBuffer); if (currentSubpartitionTierAgent[subpartitionId.getSubpartitionId()] == null) { chooseStorageTierToStartSegment(subpartitionId); } if (!currentSubpartitionTierAgent[subpartitionId.getSubpartitionId()].tryWrite( subpartitionId, compressedBuffer, bufferAccumulator)) { chooseStorageTierToStartSegment(subpartitionId); checkState( currentSubpartitionTierAgent[subpartitionId.getSubpartitionId()].tryWrite( subpartitionId, compressedBuffer, bufferAccumulator), "Failed to write the first buffer to the new segment"); } }
3.68
hbase_MultiByteBuff_asSubByteBuffer
/** * Returns bytes from given offset till length specified, as a single ByteBuffer. When all these * bytes happen to be in a single ByteBuffer, which this object wraps, that ByteBuffer item as * such will be returned (with offset in this ByteBuffer where the bytes starts). So users are * warned not to change the position or limit of this returned ByteBuffer. When the required bytes * happen to span across multiple ByteBuffers, this API will copy the bytes to a newly created * ByteBuffer of required size and return that. * @param offset the offset in this MBB from where the subBuffer should be created * @param length the length of the subBuffer * @param pair a pair that will have the bytes from the current position till length specified, * as a single ByteBuffer and offset in that Buffer where the bytes starts. The * method would set the values on the pair that is passed in by the caller */ @Override public void asSubByteBuffer(int offset, int length, ObjectIntPair<ByteBuffer> pair) { checkRefCount(); if (this.itemBeginPos[this.curItemIndex] <= offset) { int relOffsetInCurItem = offset - this.itemBeginPos[this.curItemIndex]; if (this.curItem.limit() - relOffsetInCurItem >= length) { pair.setFirst(this.curItem); pair.setSecond(relOffsetInCurItem); return; } } int itemIndex = getItemIndex(offset); ByteBuffer item = this.items[itemIndex]; offset = offset - this.itemBeginPos[itemIndex]; if (item.limit() - offset >= length) { pair.setFirst(item); pair.setSecond(offset); return; } byte[] dst = new byte[length]; int destOffset = 0; while (length > 0) { int toRead = Math.min(length, item.limit() - offset); ByteBufferUtils.copyFromBufferToArray(dst, item, offset, destOffset, toRead); length -= toRead; if (length == 0) break; itemIndex++; item = this.items[itemIndex]; destOffset += toRead; offset = 0; } pair.setFirst(ByteBuffer.wrap(dst)); pair.setSecond(0); }
3.68
hadoop_RoleModel_add
/** * Add a single statement. * @param stat new statement. */ public void add(Statement stat) { statement.add(stat); }
3.68
hbase_FileSystemUtilizationChore_getRegionSizeStore
// visible for testing RegionSizeStore getRegionSizeStore() { return rs.getRegionServerSpaceQuotaManager().getRegionSizeStore(); }
3.68
flink_OptimizerNode_getBroadcastConnections
/** Return the list of inputs associated with broadcast variables for this node. */ public List<DagConnection> getBroadcastConnections() { return this.broadcastConnections; }
3.68
streampipes_DataStreamResourceManager_update
/** * Takes a data stream {@link SpDataStream} as an input updates it in the database */ public void update(SpDataStream dataStream) { db.updateElement(dataStream); }
3.68
hbase_KeyValueUtil_createLastOnRow
/** * Creates a KeyValue that is last on the specified row id. That is, every other possible KeyValue * for the given row would compareTo() less than the result of this call. * @param row row key * @return Last possible KeyValue on passed <code>row</code> */ public static KeyValue createLastOnRow(final byte[] row) { return new KeyValue(row, null, null, HConstants.LATEST_TIMESTAMP, KeyValue.Type.Minimum); }
3.68
morf_HumanReadableStatementProducer_produceFor
/** * Produces output via the supplied consumer. * * @param consumer the consumer to consume the events. */ public void produceFor(final HumanReadableStatementConsumer consumer) { // Ensure the upgrade steps are in the correct order final Collection<Class<? extends UpgradeStep>> upgradeSteps = upgradeGraph.orderedSteps(); //Create a Multimap which has version ordered keys but insertion ordered values ListMultimap<String, UpgradeStep> orderedUpgradeSteps = Multimaps.newListMultimap( Maps.<String, Collection<UpgradeStep>>newTreeMap(new TreeMap<String, Collection<UpgradeStep>>( new Comparator<String>() { @Override public int compare(String o1, String o2) { return versionCompare(o1, o2); } }) ), new Supplier<List<UpgradeStep>>() { @Override public List<UpgradeStep> get() { return Lists.newLinkedList(); } }); // Iterate over the upgrade steps initialising them, and reordering by version and then sequence for (Class<? extends UpgradeStep> currentStepClass : upgradeSteps) { try { // Create an instance of the upgrade step Constructor<? extends UpgradeStep> constructor = currentStepClass.getDeclaredConstructor(); constructor.setAccessible(true); UpgradeStep step = constructor.newInstance(); orderedUpgradeSteps.put(getUpgradeStepVersion(step), step); } catch (Exception e) { throw new IllegalStateException("Cannot instantiate upgrade step [" + currentStepClass.getName() + "]", e); } } // Create a proxy schema editor to pass through the consumer events SchemaEditor schemaEditor = new SchemaEditor() { /** @see org.alfasoftware.morf.upgrade.SchemaEditor#addColumn(java.lang.String, org.alfasoftware.morf.metadata.Column) **/ @Override public void addColumn(String tableName, Column definition, FieldLiteral columnDefault) { consumer.schemaChange(HumanReadableStatementHelper.generateAddColumnString(tableName, definition, columnDefault)); } /** * @see org.alfasoftware.morf.upgrade.SchemaEditor#addColumn(java.lang.String, org.alfasoftware.morf.metadata.Column) */ @Override public void addColumn(String tableName, Column definition) { consumer.schemaChange(HumanReadableStatementHelper.generateAddColumnString(tableName, definition)); } /** @see org.alfasoftware.morf.upgrade.SchemaEditor#addIndex(java.lang.String, org.alfasoftware.morf.metadata.Index) **/ @Override public void addIndex(String tableName, Index index) { consumer.schemaChange(HumanReadableStatementHelper.generateAddIndexString(tableName, index)); } /** @see org.alfasoftware.morf.upgrade.SchemaEditor#addTable(org.alfasoftware.morf.metadata.Table) **/ @Override public void addTable(Table definition) { consumer.schemaChange(HumanReadableStatementHelper.generateAddTableString(definition)); } /** @see org.alfasoftware.morf.upgrade.SchemaEditor#changeColumn(java.lang.String, org.alfasoftware.morf.metadata.Column, org.alfasoftware.morf.metadata.Column) **/ @Override public void changeColumn(String tableName, Column fromDefinition, Column toDefinition) { consumer.schemaChange(HumanReadableStatementHelper.generateChangeColumnString(tableName, fromDefinition, toDefinition)); } /** @see org.alfasoftware.morf.upgrade.SchemaEditor#changeIndex(java.lang.String, org.alfasoftware.morf.metadata.Index, org.alfasoftware.morf.metadata.Index) **/ @Override public void changeIndex(String tableName, Index fromIndex, Index toIndex) { consumer.schemaChange(HumanReadableStatementHelper.generateChangeIndexString(tableName, fromIndex, toIndex)); } /** @see org.alfasoftware.morf.upgrade.SchemaEditor#removeColumn(java.lang.String, org.alfasoftware.morf.metadata.Column) **/ @Override public void removeColumn(String tableName, Column definition) { consumer.schemaChange(HumanReadableStatementHelper.generateRemoveColumnString(tableName, definition)); } /** * @see org.alfasoftware.morf.upgrade.SchemaEditor#removeColumns(java.lang.String, org.alfasoftware.morf.metadata.Column[]) */ @Override public void removeColumns(String tableName, Column... definitions) { for (Column definition : definitions) { removeColumn(tableName, definition); } } /** @see org.alfasoftware.morf.upgrade.SchemaEditor#removeIndex(java.lang.String, org.alfasoftware.morf.metadata.Index) **/ @Override public void removeIndex(String tableName, Index index) { consumer.schemaChange(HumanReadableStatementHelper.generateRemoveIndexString(tableName, index)); } /** * @see org.alfasoftware.morf.upgrade.SchemaEditor#renameIndex(java.lang.String, java.lang.String, java.lang.String) */ @Override public void renameIndex(String tableName, String fromIndexName, String toIndexName) { consumer.schemaChange(HumanReadableStatementHelper.generateRenameIndexString(tableName, fromIndexName, toIndexName)); } /** @see org.alfasoftware.morf.upgrade.SchemaEditor#removeTable(org.alfasoftware.morf.metadata.Table) **/ @Override public void removeTable(Table table) { consumer.schemaChange(HumanReadableStatementHelper.generateRemoveTableString(table)); } @Override public void renameTable(String fromTableName, String toTableName) { consumer.schemaChange(HumanReadableStatementHelper.generateRenameTableString(fromTableName, toTableName)); } @Override public void changePrimaryKeyColumns(String tableName, List<String> oldPrimaryKeyColumns, List<String> newPrimaryKeyColumns) { consumer.schemaChange(HumanReadableStatementHelper.generateChangePrimaryKeyColumnsString(tableName, oldPrimaryKeyColumns, newPrimaryKeyColumns)); } @Override public void correctPrimaryKeyColumns(String tableName, List<String> newPrimaryKeyColumns) { consumer.schemaChange(HumanReadableStatementHelper.generateChangePrimaryKeyColumnsString(tableName, newPrimaryKeyColumns)); } @Override public void addTableFrom(Table table, SelectStatement select) { consumer.schemaChange(HumanReadableStatementHelper.generateAddTableFromString(table, select)); } /** @see org.alfasoftware.morf.upgrade.SchemaEditor#analyseTable(String) **/ @Override public void analyseTable(String tableName) { consumer.schemaChange(HumanReadableStatementHelper.generateAnalyseTableFromString(tableName)); } }; //Similarly, we need a proxy DataEditor DataEditor dataEditor = new DataEditor () { @Override public void executeStatement(Statement statement) { if (reportDataChanges) { consumer.dataChange(HumanReadableStatementHelper.generateDataUpgradeString(statement, preferredSQLDialect)); } } }; //Iterate over versions, then over the ordered upgrade steps for (String version : orderedUpgradeSteps.keySet()) { consumer.versionStart("ALFA " + version); for (UpgradeStep currentStep : orderedUpgradeSteps.get(version)) { // Indicate to the consumer that the upgrade step has started consumer.upgradeStepStart(currentStep.getClass().getSimpleName(), currentStep.getDescription(), currentStep.getJiraId()); // Fire all the actual schema change events currentStep.execute(schemaEditor, dataEditor); // Indicate to the consumer that the upgrade step has ended consumer.upgradeStepEnd(currentStep.getClass().getSimpleName()); } consumer.versionEnd("ALFA " + version); } }
3.68
graphhopper_VectorTile_setType
/** * <pre> * The type of geometry stored in this feature. * </pre> * * <code>optional .vector_tile.Tile.GeomType type = 3 [default = UNKNOWN];</code> */ public Builder setType(vector_tile.VectorTile.Tile.GeomType value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; type_ = value.getNumber(); onChanged(); return this; }
3.68
hbase_RootProcedureState_release
/** * Called by the ProcedureExecutor to mark the procedure step as finished. */ protected synchronized void release(Procedure<TEnvironment> proc) { running--; }
3.68
flink_StreamProjection_projectTuple7
/** * Projects a {@link Tuple} {@link DataStream} to the previously selected fields. * * @return The projected DataStream. * @see Tuple * @see DataStream */ public <T0, T1, T2, T3, T4, T5, T6> SingleOutputStreamOperator<Tuple7<T0, T1, T2, T3, T4, T5, T6>> projectTuple7() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType()); TupleTypeInfo<Tuple7<T0, T1, T2, T3, T4, T5, T6>> tType = new TupleTypeInfo<Tuple7<T0, T1, T2, T3, T4, T5, T6>>(fTypes); return dataStream.transform( "Projection", tType, new StreamProject<IN, Tuple7<T0, T1, T2, T3, T4, T5, T6>>( fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig()))); }
3.68
framework_DateCellDayEvent_getMinTimeRange
/** * @return the minimum amount of ms that an event must last when resized */ private long getMinTimeRange() { return DateConstants.MINUTEINMILLIS * 30; }
3.68
hmily_DeleteStatementAssembler_assembleHmilyDeleteStatement
/** * Assemble Hmily delete statement. * * @param deleteStatement delete statement * @param hmilyDeleteStatement hmily delete statement * @return hmily delete statement */ public static HmilyDeleteStatement assembleHmilyDeleteStatement(final DeleteStatement deleteStatement, final HmilyDeleteStatement hmilyDeleteStatement) { return hmilyDeleteStatement; }
3.68
flink_SingleOutputStreamOperator_setUidHash
/** * Sets an user provided hash for this operator. This will be used AS IS the create the * JobVertexID. * * <p>The user provided hash is an alternative to the generated hashes, that is considered when * identifying an operator through the default hash mechanics fails (e.g. because of changes * between Flink versions). * * <p><strong>Important</strong>: this should be used as a workaround or for trouble shooting. * The provided hash needs to be unique per transformation and job. Otherwise, job submission * will fail. Furthermore, you cannot assign user-specified hash to intermediate nodes in an * operator chain and trying so will let your job fail. * * <p>A use case for this is in migration between Flink versions or changing the jobs in a way * that changes the automatically generated hashes. In this case, providing the previous hashes * directly through this method (e.g. obtained from old logs) can help to reestablish a lost * mapping from states to their target operator. * * @param uidHash The user provided hash for this operator. This will become the JobVertexID, * which is shown in the logs and web ui. * @return The operator with the user provided hash. */ @PublicEvolving public SingleOutputStreamOperator<T> setUidHash(String uidHash) { transformation.setUidHash(uidHash); return this; }
3.68
framework_ComboBox_getFirstItemIndexOnCurrentPage
/** * Returns the index of the first item on the current page. The index is to * the underlying (possibly filtered) contents. The null item, if any, does * not have an index but takes up a slot on the first page. * * @param needNullSelectOption * true if a null option should be shown before any other options * (takes up the first slot on the first page, not counted in * index) * @return first item to show on the UI (index to the filtered list of * options, not taking the null item into consideration if any) */ private int getFirstItemIndexOnCurrentPage(boolean needNullSelectOption) { // Not all options are visible, find out which ones are on the // current "page". int first = currentPage * pageLength; if (needNullSelectOption && currentPage > 0) { first--; } return first; }
3.68
hbase_MasterRpcServices_switchSnapshotCleanup
/** * Turn on/off snapshot auto-cleanup based on TTL * @param enabledNewVal Set to <code>true</code> to enable, <code>false</code> to disable * @param synchronous If <code>true</code>, it waits until current snapshot cleanup is * completed, if outstanding * @return previous snapshot auto-cleanup mode */ private synchronized boolean switchSnapshotCleanup(final boolean enabledNewVal, final boolean synchronous) throws IOException { final boolean oldValue = server.snapshotCleanupStateStore.get(); server.switchSnapshotCleanup(enabledNewVal, synchronous); LOG.info("{} Successfully set snapshot cleanup to {}", server.getClientIdAuditPrefix(), enabledNewVal); return oldValue; }
3.68
flink_FromClasspathEntryClassInformationProvider_createFromClasspath
/** * Creates a {@code FromClasspathEntryClassInformationProvider} looking for the entry class * providing the main method on the passed classpath. * * @param classpath The classpath the job class is expected to be part of. * @return The {@code FromClasspathEntryClassInformationProvider} providing the job class found * on the passed classpath. * @throws IOException If some Jar listed on the classpath wasn't accessible. * @throws FlinkException Either no or too many main methods were found on the classpath. */ public static FromClasspathEntryClassInformationProvider createFromClasspath( Iterable<URL> classpath) throws IOException, FlinkException { return new FromClasspathEntryClassInformationProvider( extractJobClassFromUrlClasspath(classpath)); }
3.68
morf_DataValueLookup_getString
/** * Gets the value as a string. Intended to allow the record to be easily serialised. * Always succeeds, and uses the following conversions from the underlying type: * * <dl> * <dt>BigDecimal</dt><dd>Uses {@link BigDecimal#toPlainString()}</dd> * <dt>Byte Array</dt><dd>Converted to a MIME Base 64 string</dd> * <dt>Others</dt><dd>Use {@link Object#toString()}</dd> * </dl> * * @param name The column name. * @return The value. */ public default String getString(String name) { return getValue(name); }
3.68
hadoop_ClientMmap_close
/** * Close the ClientMmap object. */ @Override public void close() { if (replica != null) { if (anchored) { replica.removeNoChecksumAnchor(); } replica.unref(); } replica = null; }
3.68
hmily_DateUtils_parseLocalDateTime
/** * parseLocalDateTime. * out put format:yyyy-MM-dd HH:mm:ss * * @param str date String * @return yyyy-MM-dd HH:mm:ss * @see LocalDateTime */ private static LocalDateTime parseLocalDateTime(final String str) { return LocalDateTime.parse(str, DateTimeFormatter.ofPattern(DATE_FORMAT_DATETIME)); }
3.68
flink_SqlFunctionUtils_initcap
/** SQL INITCAP(string) function. */ public static String initcap(String s) { // Assumes Alpha as [A-Za-z0-9] // white space is treated as everything else. final int len = s.length(); boolean start = true; final StringBuilder newS = new StringBuilder(); for (int i = 0; i < len; i++) { char curCh = s.charAt(i); final int c = (int) curCh; if (start) { // curCh is whitespace or first character of word. if (c > 47 && c < 58) { // 0-9 start = false; } else if (c > 64 && c < 91) { // A-Z start = false; } else if (c > 96 && c < 123) { // a-z start = false; curCh = (char) (c - 32); // Uppercase this character } // else {} whitespace } else { // Inside of a word or white space after end of word. if (c > 47 && c < 58) { // 0-9 // noop } else if (c > 64 && c < 91) { // A-Z curCh = (char) (c + 32); // Lowercase this character } else if (c > 96 && c < 123) { // a-z // noop } else { // whitespace start = true; } } newS.append(curCh); } // for each character in s return newS.toString(); }
3.68
hadoop_JsonSerialization_save
/** * Save to a Hadoop filesystem. * @param fs filesystem * @param path path * @param overwrite should any existing file be overwritten * @param instance instance * @throws IOException IO exception. */ public void save(FileSystem fs, Path path, T instance, boolean overwrite) throws IOException { writeJsonAsBytes(instance, fs.create(path, overwrite)); }
3.68
hbase_StructBuilder_reset
/** * Reset the sequence of accumulated fields. */ public StructBuilder reset() { fields.clear(); return this; }
3.68
hbase_ServerManager_getDrainingServersList
/** Returns A copy of the internal list of draining servers. */ public List<ServerName> getDrainingServersList() { return new ArrayList<>(this.drainingServers); }
3.68
framework_VTabsheetPanel_showWidget
/** * Shows the widget at the specified index. This causes the currently- * visible widget to be hidden. * * @param index * the index of the widget to be shown */ public void showWidget(int index) { checkIndexBoundsForAccess(index); Widget newVisible = getWidget(index); if (visibleWidget != newVisible) { if (visibleWidget != null) { hide(DOM.getParent(visibleWidget.getElement())); } visibleWidget = newVisible; touchScrollHandler .setElements(visibleWidget.getElement().getParentElement()); } // Always ensure the selected tab is visible. If server prevents a tab // change we might end up here with visibleWidget == newVisible but its // parent is still hidden. unHide(DOM.getParent(visibleWidget.getElement())); }
3.68
hbase_OrderedInt64_encodeLong
/** * Write instance {@code val} into buffer {@code dst}. * @param dst the {@link PositionedByteRange} to write to * @param val the value to write to {@code dst} * @return the number of bytes written */ public int encodeLong(PositionedByteRange dst, long val) { return OrderedBytes.encodeInt64(dst, val, order); }
3.68
flink_BufferFileChannelReader_readBufferFromFileChannel
/** * Reads data from the object's file channel into the given buffer. * * @param buffer the buffer to read into * @return whether the end of the file has been reached (<tt>true</tt>) or not (<tt>false</tt>) */ public boolean readBufferFromFileChannel(Buffer buffer) throws IOException { checkArgument(fileChannel.size() - fileChannel.position() > 0); // Read header header.clear(); fileChannel.read(header); header.flip(); final boolean isBuffer = header.getInt() == 1; final int size = header.getInt(); if (size > buffer.getMaxCapacity()) { throw new IllegalStateException( "Buffer is too small for data: " + buffer.getMaxCapacity() + " bytes available, but " + size + " needed. This is most likely due to an serialized event, which is larger than the buffer size."); } checkArgument(buffer.getSize() == 0, "Buffer not empty"); fileChannel.read(buffer.getNioBuffer(0, size)); buffer.setSize(size); buffer.setDataType(isBuffer ? Buffer.DataType.DATA_BUFFER : Buffer.DataType.EVENT_BUFFER); return fileChannel.size() - fileChannel.position() == 0; }
3.68
hbase_RemoteProcedureException_deserialize
/** * Takes a series of bytes and tries to generate an RemoteProcedureException instance for it. * @param bytes the bytes to generate the {@link RemoteProcedureException} from * @return the ForeignExcpetion instance * @throws IOException if there was deserialization problem this is thrown. */ public static RemoteProcedureException deserialize(byte[] bytes) throws IOException { return fromProto(ForeignExceptionMessage.parseFrom(bytes)); }
3.68
hadoop_AbfsInputStreamStatisticsImpl_seek
/** * Record a forward or backward seek, adding a seek operation, a forward or * a backward seek operation, and number of bytes skipped. * The seek direction will be calculated based on the parameters. * * @param seekTo seek to the position. * @param currentPos current position. */ @Override public void seek(long seekTo, long currentPos) { if (seekTo >= currentPos) { this.seekForwards(seekTo - currentPos); } else { this.seekBackwards(currentPos - seekTo); } }
3.68
hadoop_ResourceRequest_clone
/** * Clone a ResourceRequest object (shallow copy). Please keep it loaded with * all (new) fields * * @param rr the object to copy from * @return the copied object */ @Public @Evolving public static ResourceRequest clone(ResourceRequest rr) { // Please keep it loaded with all (new) fields return ResourceRequest.newBuilder().priority(rr.getPriority()) .resourceName(rr.getResourceName()).capability(rr.getCapability()) .numContainers(rr.getNumContainers()) .relaxLocality(rr.getRelaxLocality()) .nodeLabelExpression(rr.getNodeLabelExpression()) .executionTypeRequest(rr.getExecutionTypeRequest()) .allocationRequestId(rr.getAllocationRequestId()).build(); }
3.68
framework_PropertyDefinition_getTopLevelName
/** * Gets the top level name of this property. * * @return the top level property name, not <code>null</code> * @since 8.3 */ public default String getTopLevelName() { return getName(); }
3.68
morf_AbstractSqlDialectTest_testAddDays
/** * Test that AddDays functionality behaves as expected. */ @Test public void testAddDays() { String result = testDialect.getSqlFrom(addDays(field("testField"), new FieldLiteral(-20))); assertEquals(expectedAddDays(), result); }
3.68
hudi_DFSPropertiesConfiguration_addPropsFromStream
/** * Add properties from buffered reader. * * @param reader Buffered Reader * @throws IOException */ public void addPropsFromStream(BufferedReader reader, Path cfgFilePath) throws IOException { try { reader.lines().forEach(line -> { if (!isValidLine(line)) { return; } String[] split = splitProperty(line); if (line.startsWith("include=") || line.startsWith("include =")) { Path providedPath = new Path(split[1]); FileSystem providedFs = FSUtils.getFs(split[1], hadoopConfig); // In the case that only filename is provided, assume it's in the same directory. if ((!providedPath.isAbsolute() || StringUtils.isNullOrEmpty(providedFs.getScheme())) && cfgFilePath != null) { providedPath = new Path(cfgFilePath.getParent(), split[1]); } addPropsFromFile(providedPath); } else { hoodieConfig.setValue(split[0], split[1]); } }); } finally { reader.close(); } }
3.68
hadoop_BufferedIOStatisticsOutputStream_hasCapability
/** * If the inner stream supports {@link StreamCapabilities}, * forward the probe to it. * Otherwise: return false. * * @param capability string to query the stream support for. * @return true if a capability is known to be supported. */ @Override public boolean hasCapability(final String capability) { if (out instanceof StreamCapabilities) { return ((StreamCapabilities) out).hasCapability(capability); } else { return false; } }
3.68
hadoop_ClientThrottlingIntercept_sendingRequest
/** * Called before the Azure Storage SDK sends a request. Client-side throttling * uses this to suspend the request, if necessary, to minimize errors and * maximize throughput. * * @param event The connection, operation, and request state. */ public static void sendingRequest(SendingRequestEvent event) { BlobOperationDescriptor.OperationType operationType = BlobOperationDescriptor.getOperationType( (HttpURLConnection) event.getConnectionObject()); switch (operationType) { case GetBlob: singleton.readThrottler.suspendIfNecessary(); break; case AppendBlock: case PutBlock: case PutPage: singleton.writeThrottler.suspendIfNecessary(); break; default: break; } }
3.68
hudi_ExpressionPredicates_bindPredicates
/** * Binds predicates to create an OR predicate. * * @param predicates The disjunctive predicates. * @return An OR predicate. */ public Predicate bindPredicates(Predicate... predicates) { this.predicates = predicates; return this; }
3.68
open-banking-gateway_PsuEncryptionServiceProvider_forPublicKey
/** * Public key (write only) encryption. * @param keyId Key ID * @param key Public key * @return Encryption service for writing only */ public EncryptionService forPublicKey(UUID keyId, PublicKey key) { return oper.encryptionService(keyId.toString(), key); }
3.68
morf_AddIndex_getTableName
/** * @return the name of the table to add the column to. */ public String getTableName() { return tableName; }
3.68
hadoop_NamenodeStatusReport_getState
/** * Get the state of the Namenode being monitored. * * @return State of the Namenode. */ public FederationNamenodeServiceState getState() { if (!registrationValid) { return FederationNamenodeServiceState.UNAVAILABLE; } else if (haStateValid) { return FederationNamenodeServiceState.getState(status); } else { return FederationNamenodeServiceState.ACTIVE; } }
3.68
hbase_AsyncTable_toRow
/** * Specify a stop row * @param endKey select regions up to and including the region containing this row, exclusive. */ default CoprocessorServiceBuilder<S, R> toRow(byte[] endKey) { return toRow(endKey, false); }
3.68
flink_MailboxMetricsController_isLatencyMeasurementSetup
/** * Indicates if latency measurement has been setup. * * @return True if latency measurement has been setup. */ public boolean isLatencyMeasurementSetup() { return this.timerService != null && this.mailboxExecutor != null; }
3.68
hbase_DateTieredCompactionPolicy_getCompactionBoundariesForMinor
/** * Returns a list of boundaries for multiple compaction output from minTimestamp to maxTimestamp. */ private static List<Long> getCompactionBoundariesForMinor(CompactionWindow window, boolean singleOutput) { List<Long> boundaries = new ArrayList<>(); boundaries.add(Long.MIN_VALUE); if (!singleOutput) { boundaries.add(window.startMillis()); } return boundaries; }
3.68
hbase_StorageClusterStatusModel_getTotalStaticBloomSizeKB
/** Returns The total size of static bloom, in KB */ @XmlAttribute public int getTotalStaticBloomSizeKB() { return totalStaticBloomSizeKB; }
3.68
flink_ExceptionUtils_isJvmFatalError
/** * Checks whether the given exception indicates a situation that may leave the JVM in a * corrupted state, meaning a state where continued normal operation can only be guaranteed via * clean process restart. * * <p>Currently considered fatal exceptions are Virtual Machine errors indicating that the JVM * is corrupted, like {@link InternalError}, {@link UnknownError}, and {@link * java.util.zip.ZipError} (a special case of InternalError). The {@link ThreadDeath} exception * is also treated as a fatal error, because when a thread is forcefully stopped, there is a * high chance that parts of the system are in an inconsistent state. * * @param t The exception to check. * @return True, if the exception is considered fatal to the JVM, false otherwise. */ public static boolean isJvmFatalError(Throwable t) { return (t instanceof InternalError) || (t instanceof UnknownError) || (t instanceof ThreadDeath); }
3.68
streampipes_EpProperties_listEp
/** * Creates a new list-based event property of the parameter type eventProperty * * @param label A human-readable label of the property * @param runtimeName The field identifier of the event property at runtime. * @param eventProperty The complex type of data in the list * @return {@link org.apache.streampipes.model.schema.EventPropertyList} */ public static EventPropertyList listEp(Label label, String runtimeName, EventProperty eventProperty, String domainProperty) { return getPreparedProperty(label, new EventPropertyList(runtimeName, eventProperty, Utils.createURI (domainProperty))); }
3.68
flink_AvroWriters_forSpecificRecord
/** * Creates an {@link AvroWriterFactory} for an Avro specific type. The Avro writers will use the * schema of that specific type to build and write the records. * * @param type The class of the type to write. */ public static <T extends SpecificRecordBase> AvroWriterFactory<T> forSpecificRecord( Class<T> type) { String schemaString = SpecificData.get().getSchema(type).toString(); AvroBuilder<T> builder = (out) -> createAvroDataFileWriter(schemaString, SpecificDatumWriter::new, out); return new AvroWriterFactory<>(builder); }
3.68
morf_MathsField_getRightField
/** * @return the rightField */ public AliasedField getRightField() { return rightField; }
3.68
hmily_SQLImageMapperFactory_newInstance
/** * Create new instance of SQL image mapper. * * @param sqlTuple SQL tuple * @return SQL image mapper */ public static SQLImageMapper newInstance(final HmilySQLTuple sqlTuple) { switch (sqlTuple.getManipulationType()) { case INSERT: return new InsertSQLImageMapper(sqlTuple.getTableName(), sqlTuple.getAfterImage()); case UPDATE: return new UpdateSQLImageMapper(sqlTuple.getTableName(), sqlTuple.getBeforeImage(), sqlTuple.getAfterImage()); case DELETE: return new DeleteSQLImageMapper(sqlTuple.getTableName(), sqlTuple.getBeforeImage()); default: throw new SQLRevertException(String.format("unsupported SQL manipulate type [%s]", sqlTuple.getManipulationType())); } }
3.68
hbase_WALSplitUtil_getRecoveredHFilesDir
/** * @param regionDir This regions directory in the filesystem * @param familyName The column family name * @return The directory that holds recovered hfiles for the region's column family */ private static Path getRecoveredHFilesDir(final Path regionDir, String familyName) { return new Path(new Path(regionDir, familyName), HConstants.RECOVERED_HFILES_DIR); }
3.68
hbase_Client_executePathOnly
/** * Execute a transaction method given only the path. Will select at random one of the members of * the supplied cluster definition and iterate through the list until a transaction can be * successfully completed. The definition of success here is a complete HTTP transaction, * irrespective of result code. * @param cluster the cluster definition * @param method the transaction method * @param headers HTTP header values to send * @param path the properly urlencoded path * @return the HTTP response code */ public HttpResponse executePathOnly(Cluster cluster, HttpUriRequest method, Header[] headers, String path) throws IOException { IOException lastException; if (cluster.nodes.size() < 1) { throw new IOException("Cluster is empty"); } int start = (int) Math.round((cluster.nodes.size() - 1) * Math.random()); int i = start; do { cluster.lastHost = cluster.nodes.get(i); try { StringBuilder sb = new StringBuilder(); if (sslEnabled) { sb.append("https://"); } else { sb.append("http://"); } sb.append(cluster.lastHost); sb.append(path); URI uri = new URI(sb.toString()); if (method instanceof HttpPut) { HttpPut put = new HttpPut(uri); put.setEntity(((HttpPut) method).getEntity()); put.setHeaders(method.getAllHeaders()); method = put; } else if (method instanceof HttpGet) { method = new HttpGet(uri); } else if (method instanceof HttpHead) { method = new HttpHead(uri); } else if (method instanceof HttpDelete) { method = new HttpDelete(uri); } else if (method instanceof HttpPost) { HttpPost post = new HttpPost(uri); post.setEntity(((HttpPost) method).getEntity()); post.setHeaders(method.getAllHeaders()); method = post; } return executeURI(method, headers, uri.toString()); } catch (IOException e) { lastException = e; } catch (URISyntaxException use) { lastException = new IOException(use); } } while (++i != start && i < cluster.nodes.size()); throw lastException; }
3.68