name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
framework_UIConnector_getHead
/** * Internal helper to get the <head> tag of the page * * @since 7.3 * @return the head element */ private HeadElement getHead() { return HeadElement.as(Document.get() .getElementsByTagName(HeadElement.TAG).getItem(0)); }
3.68
hbase_SnapshotInfo_getSize
/** Returns the file size */ public long getSize() { return this.size; }
3.68
flink_StreamElement_asLatencyMarker
/** * Casts this element into a LatencyMarker. * * @return This element as a LatencyMarker. * @throws java.lang.ClassCastException Thrown, if this element is actually not a LatencyMarker. */ public final LatencyMarker asLatencyMarker() { return (LatencyMarker) this; }
3.68
querydsl_StringExpression_endsWithIgnoreCase
/** * Create a {@code this.endsWithIgnoreCase(str)} expression * * <p>Returns true if this ends with str, compares case insensitively</p> * * @param str string * @return this.endsWithIgnoreCase(str) */ public BooleanExpression endsWithIgnoreCase(String str) { return endsWithIgnoreCase(ConstantImpl.create(str)); }
3.68
hbase_SnapshotReferenceUtil_getHFileNames
/** * Returns the store file names in the snapshot. * @param conf The current {@link Configuration} instance. * @param fs {@link FileSystem} * @param snapshotDir {@link Path} to the Snapshot directory * @param snapshotDesc the {@link SnapshotDescription} of the snapshot to inspect * @throws IOException if an error occurred while scanning the directory * @return the names of hfiles in the specified snaphot */ private static Set<String> getHFileNames(final Configuration conf, final FileSystem fs, final Path snapshotDir, final SnapshotDescription snapshotDesc) throws IOException { final Set<String> names = new HashSet<>(); visitTableStoreFiles(conf, fs, snapshotDir, snapshotDesc, new StoreFileVisitor() { @Override public void storeFile(final RegionInfo regionInfo, final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException { String hfile = storeFile.getName(); if (HFileLink.isHFileLink(hfile)) { names.add(HFileLink.getReferencedHFileName(hfile)); } else if (StoreFileInfo.isReference(hfile)) { Path refPath = StoreFileInfo.getReferredToFile(new Path(new Path( new Path(new Path(regionInfo.getTable().getNamespaceAsString(), regionInfo.getTable().getQualifierAsString()), regionInfo.getEncodedName()), family), hfile)); names.add(hfile); names.add(refPath.getName()); if (HFileLink.isHFileLink(refPath.getName())) { names.add(HFileLink.getReferencedHFileName(refPath.getName())); } } else { names.add(hfile); } } }); return names; }
3.68
dubbo_DubboBootstrap_configCenter
// {@link ConfigCenterConfig} correlative methods public DubboBootstrap configCenter(Consumer<ConfigCenterBuilder> consumerBuilder) { return configCenter(null, consumerBuilder); }
3.68
hadoop_OBSWriteOperationHelper_initiateMultiPartUpload
/** * Start the multipart upload process. * * @param destKey object key * @return the upload result containing the ID * @throws IOException IO problem */ String initiateMultiPartUpload(final String destKey) throws IOException { LOG.debug("Initiating Multipart upload"); final InitiateMultipartUploadRequest initiateMPURequest = new InitiateMultipartUploadRequest(bucket, destKey); initiateMPURequest.setAcl(owner.getCannedACL()); initiateMPURequest.setMetadata(newObjectMetadata(-1)); if (owner.getSse().isSseCEnable()) { initiateMPURequest.setSseCHeader(owner.getSse().getSseCHeader()); } else if (owner.getSse().isSseKmsEnable()) { initiateMPURequest.setSseKmsHeader( owner.getSse().getSseKmsHeader()); } try { return obs.initiateMultipartUpload(initiateMPURequest) .getUploadId(); } catch (ObsException ace) { throw OBSCommonUtils.translateException("Initiate MultiPartUpload", destKey, ace); } }
3.68
hmily_CuratorZookeeperClient_addListener
/** * Add listener. * * @param context the context * @param passiveHandler the passive handler * @param config the config * @throws Exception the exception */ public void addListener(final Supplier<ConfigLoader.Context> context, final ConfigLoader.PassiveHandler<ZkPassiveConfig> passiveHandler, final ZookeeperConfig config) throws Exception { if (!config.isPassive()) { return; } if (client == null) { LOGGER.warn("zookeeper client is null..."); } // Use CuratorCache to monitor and find that the lower version of zk cannot monitor the message. // But using this high version marked as @Deprecated can receive messages normally.。 //@see CuratorCache NodeCache cache = new NodeCache(client, config.getPath()); cache.getListenable().addListener(() -> { byte[] data = cache.getCurrentData().getData(); String string = new String(data, StandardCharsets.UTF_8); ZkPassiveConfig zkPassiveConfig = new ZkPassiveConfig(); zkPassiveConfig.setPath(config.getPath()); zkPassiveConfig.setFileExtension(config.getFileExtension()); zkPassiveConfig.setValue(string); passiveHandler.passive(context, zkPassiveConfig); }); cache.start(); LOGGER.info("passive zookeeper remote started...."); }
3.68
rocketmq-connect_FieldsMetadata_extract
/** * extract metadata info * * @param tableName * @param pkMode * @param configuredPkFields * @param fieldsWhitelist * @param schema * @param headers * @return */ public static FieldsMetadata extract( final String tableName, final JdbcSinkConfig.PrimaryKeyMode pkMode, final List<String> configuredPkFields, final Set<String> fieldsWhitelist, final Schema keySchema, final Schema schema, final KeyValue headers ) { if (schema != null && schema.getFieldType() != FieldType.STRUCT) { throw new ConnectException("Value schema must be of type Struct"); } final Map<String, SinkRecordField> allFields = new HashMap<>(); final Set<String> keyFieldNames = new LinkedHashSet<>(); switch (pkMode) { case NONE: break; case RECORD_KEY: extractRecordKeyPk(tableName, configuredPkFields, keySchema, allFields, keyFieldNames); break; case RECORD_VALUE: extractRecordValuePk(tableName, configuredPkFields, schema, headers, allFields, keyFieldNames); break; default: throw new ConnectException("Unknown primary key mode: " + pkMode); } final Set<String> nonKeyFieldNames = new LinkedHashSet<>(); if (schema != null) { for (Field field : schema.getFields()) { if (keyFieldNames.contains(field.getName())) { continue; } if (!fieldsWhitelist.isEmpty() && !fieldsWhitelist.contains(field.getName())) { continue; } nonKeyFieldNames.add(field.getName()); final Schema fieldSchema = field.getSchema(); allFields.put(field.getName(), new SinkRecordField(fieldSchema, field.getName(), false)); } } if (allFields.isEmpty()) { throw new ConnectException( "No fields found using key and value schemas for table: " + tableName ); } final Map<String, SinkRecordField> allFieldsOrdered = new LinkedHashMap<>(); if (schema != null) { for (Field field : schema.getFields()) { String fieldName = field.getName(); if (allFields.containsKey(fieldName)) { allFieldsOrdered.put(fieldName, allFields.get(fieldName)); } } } if (allFieldsOrdered.size() < allFields.size()) { ArrayList<String> fieldKeys = new ArrayList<>(allFields.keySet()); Collections.sort(fieldKeys); for (String fieldName : fieldKeys) { if (!allFieldsOrdered.containsKey(fieldName)) { allFieldsOrdered.put(fieldName, allFields.get(fieldName)); } } } return new FieldsMetadata(keyFieldNames, nonKeyFieldNames, allFieldsOrdered); }
3.68
morf_SqlDialect_getSqlforBlobLength
/** * Converts the function get LENGTH of Blob data or field into SQL. * Use LENGTH instead of OCTET_LENGTH as they are synonymous in MySQl and PostGreSQL. In H2 LENGTH returns the correct * number of bytes, whereas OCTET_LENGTH returns 2 times the byte length. * @param function the function to convert. * @return a string representation of the SQL. * @see org.alfasoftware.morf.sql.element.Function#blobLength(AliasedField) */ protected String getSqlforBlobLength(Function function) { return String.format("LENGTH(%s)", getSqlFrom(function.getArguments().get(0))); }
3.68
flink_DefaultCheckpointPlanCalculator_checkAllTasksInitiated
/** * Checks if all tasks are attached with the current Execution already. This method should be * called from JobMaster main thread executor. * * @throws CheckpointException if some tasks do not have attached Execution. */ private void checkAllTasksInitiated() throws CheckpointException { for (ExecutionVertex task : allTasks) { if (task.getCurrentExecutionAttempt() == null) { throw new CheckpointException( String.format( "task %s of job %s is not being executed at the moment. Aborting checkpoint.", task.getTaskNameWithSubtaskIndex(), jobId), CheckpointFailureReason.NOT_ALL_REQUIRED_TASKS_RUNNING); } } }
3.68
hbase_NamespacesInstanceResource_deleteNoBody
/** * Build a response for DELETE delete namespace. * @param message value not used. * @param headers value not used. * @return response code. */ @DELETE public Response deleteNoBody(final byte[] message, final @Context UriInfo uriInfo, final @Context HttpHeaders headers) { if (LOG.isTraceEnabled()) { LOG.trace("DELETE " + uriInfo.getAbsolutePath()); } if (servlet.isReadOnly()) { servlet.getMetrics().incrementFailedDeleteRequests(1); return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) .entity("Forbidden" + CRLF).build(); } try { Admin admin = servlet.getAdmin(); if (!doesNamespaceExist(admin, namespace)) { return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) .entity("Namespace '" + namespace + "' does not exists. Cannot " + "drop namespace.") .build(); } admin.deleteNamespace(namespace); servlet.getMetrics().incrementSucessfulDeleteRequests(1); return Response.ok().build(); } catch (IOException e) { servlet.getMetrics().incrementFailedDeleteRequests(1); return processException(e); } }
3.68
hbase_TimeRange_until
/** * Represents the time interval [0, maxStamp) * @param maxStamp the minimum timestamp value, exclusive */ public static TimeRange until(long maxStamp) { check(INITIAL_MIN_TIMESTAMP, maxStamp); return new TimeRange(INITIAL_MIN_TIMESTAMP, maxStamp); }
3.68
hadoop_TimelineEntity_addRelatedEntities
/** * Add a map of related entities to the existing related entity map * * @param relatedEntities * a map of related entities */ public void addRelatedEntities(Map<String, Set<String>> relatedEntities) { for (Entry<String, Set<String>> relatedEntity : relatedEntities.entrySet()) { Set<String> thisRelatedEntity = this.relatedEntities.get(relatedEntity.getKey()); if (thisRelatedEntity == null) { this.relatedEntities.put( relatedEntity.getKey(), relatedEntity.getValue()); } else { thisRelatedEntity.addAll(relatedEntity.getValue()); } } }
3.68
flink_SlotPool_requestNewAllocatedSlot
/** * Request the allocation of a new slot from the resource manager. This method will not return a * slot from the already available slots from the pool, but instead will add a new slot to that * pool that is immediately allocated and returned. * * @param slotRequestId identifying the requested slot * @param resourceProfile resource profile that specifies the resource requirements for the * requested slot * @param timeout timeout for the allocation procedure * @return a newly allocated slot that was previously not available. */ default CompletableFuture<PhysicalSlot> requestNewAllocatedSlot( SlotRequestId slotRequestId, ResourceProfile resourceProfile, @Nullable Time timeout) { return requestNewAllocatedSlot( slotRequestId, resourceProfile, Collections.emptyList(), timeout); }
3.68
querydsl_NumberExpression_add
/** * Create a {@code this + right} expression * * <p>Get the sum of this and right</p> * * @param right rhs of expression * @return this + right */ public <N extends Number & Comparable<N>> NumberExpression<T> add(N right) { return Expressions.numberOperation(getType(), Ops.ADD, mixin, ConstantImpl.create(right)); }
3.68
flink_JobGraph_findVertexByID
/** * Searches for a vertex with a matching ID and returns it. * * @param id the ID of the vertex to search for * @return the vertex with the matching ID or <code>null</code> if no vertex with such ID could * be found */ public JobVertex findVertexByID(JobVertexID id) { return this.taskVertices.get(id); }
3.68
dubbo_ReferenceConfig_checkMeshConfig
/** * check if mesh config is correct * * @param referenceParameters referenceParameters * @return mesh config is correct */ private boolean checkMeshConfig(Map<String, String> referenceParameters) { if (!"true".equals(referenceParameters.getOrDefault(MESH_ENABLE, "false"))) { // In mesh mode, unloadClusterRelated can only be false. referenceParameters.put(UNLOAD_CLUSTER_RELATED, "false"); return false; } getScopeModel() .getConfigManager() .getProtocol(TRIPLE) .orElseThrow(() -> new IllegalStateException("In mesh mode, a triple protocol must be specified")); String providedBy = referenceParameters.get(PROVIDED_BY); if (StringUtils.isEmpty(providedBy)) { throw new IllegalStateException("In mesh mode, the providedBy of ReferenceConfig is must be set"); } return true; }
3.68
flink_RestClient_forUrl
/** * Creates a new RestClient for the provided root URL. If the protocol of the URL is "https", * then SSL is automatically enabled for the REST client. */ public static RestClient forUrl(Configuration configuration, Executor executor, URL rootUrl) throws ConfigurationException { Preconditions.checkNotNull(configuration); Preconditions.checkNotNull(rootUrl); if ("https".equals(rootUrl.getProtocol())) { configuration = configuration.clone(); configuration.setBoolean(SSL_REST_ENABLED, true); } return new RestClient(configuration, executor, rootUrl.getHost(), rootUrl.getPort()); }
3.68
dubbo_ScopeModelAware_setApplicationModel
/** * Override this method if you just need application model * @param applicationModel */ default void setApplicationModel(ApplicationModel applicationModel) {}
3.68
AreaShop_AreaShop_isReady
/** * Indicates if the plugin is ready to be used. * @return true if the plugin is ready, false otherwise */ public boolean isReady() { return ready; }
3.68
pulsar_DebeziumSource_topicNamespace
// namespace for output topics, default value is "tenant/namespace" public static String topicNamespace(SourceContext sourceContext) { String tenant = sourceContext.getTenant(); String namespace = sourceContext.getNamespace(); return (StringUtils.isEmpty(tenant) ? TopicName.PUBLIC_TENANT : tenant) + "/" + (StringUtils.isEmpty(namespace) ? TopicName.DEFAULT_NAMESPACE : namespace); }
3.68
hbase_ReplicationPeerConfigUtil_parseTableCFsFromConfig
/** * Convert tableCFs string into Map. */ public static Map<TableName, List<String>> parseTableCFsFromConfig(String tableCFsConfig) { ReplicationProtos.TableCF[] tableCFs = convert(tableCFsConfig); return convert2Map(tableCFs); }
3.68
flink_InternalTimerServiceImpl_startTimerService
/** * Starts the local {@link InternalTimerServiceImpl} by: * * <ol> * <li>Setting the {@code keySerialized} and {@code namespaceSerializer} for the timers it * will contain. * <li>Setting the {@code triggerTarget} which contains the action to be performed when a * timer fires. * <li>Re-registering timers that were retrieved after recovering from a node failure, if any. * </ol> * * <p>This method can be called multiple times, as long as it is called with the same * serializers. */ public void startTimerService( TypeSerializer<K> keySerializer, TypeSerializer<N> namespaceSerializer, Triggerable<K, N> triggerTarget) { if (!isInitialized) { if (keySerializer == null || namespaceSerializer == null) { throw new IllegalArgumentException("The TimersService serializers cannot be null."); } if (this.keySerializer != null || this.namespaceSerializer != null || this.triggerTarget != null) { throw new IllegalStateException("The TimerService has already been initialized."); } // the following is the case where we restore if (restoredTimersSnapshot != null) { TypeSerializerSchemaCompatibility<K> keySerializerCompatibility = restoredTimersSnapshot .getKeySerializerSnapshot() .resolveSchemaCompatibility(keySerializer); if (keySerializerCompatibility.isIncompatible() || keySerializerCompatibility.isCompatibleAfterMigration()) { throw new IllegalStateException( "Tried to initialize restored TimerService with new key serializer that requires migration or is incompatible."); } TypeSerializerSchemaCompatibility<N> namespaceSerializerCompatibility = restoredTimersSnapshot .getNamespaceSerializerSnapshot() .resolveSchemaCompatibility(namespaceSerializer); restoredTimersSnapshot = null; if (namespaceSerializerCompatibility.isIncompatible() || namespaceSerializerCompatibility.isCompatibleAfterMigration()) { throw new IllegalStateException( "Tried to initialize restored TimerService with new namespace serializer that requires migration or is incompatible."); } this.keySerializer = keySerializerCompatibility.isCompatibleAsIs() ? keySerializer : keySerializerCompatibility.getReconfiguredSerializer(); this.namespaceSerializer = namespaceSerializerCompatibility.isCompatibleAsIs() ? namespaceSerializer : namespaceSerializerCompatibility.getReconfiguredSerializer(); } else { this.keySerializer = keySerializer; this.namespaceSerializer = namespaceSerializer; } this.keyDeserializer = null; this.namespaceDeserializer = null; this.triggerTarget = Preconditions.checkNotNull(triggerTarget); // re-register the restored timers (if any) final InternalTimer<K, N> headTimer = processingTimeTimersQueue.peek(); if (headTimer != null) { nextTimer = processingTimeService.registerTimer( headTimer.getTimestamp(), this::onProcessingTime); } this.isInitialized = true; } else { if (!(this.keySerializer.equals(keySerializer) && this.namespaceSerializer.equals(namespaceSerializer))) { throw new IllegalArgumentException( "Already initialized Timer Service " + "tried to be initialized with different key and namespace serializers."); } } }
3.68
hadoop_Check_notNull
/** * Verifies a variable is not NULL. * * @param obj the variable to check. * @param name the name to use in the exception message. * * @return the variable. * * @throws IllegalArgumentException if the variable is NULL. */ public static <T> T notNull(T obj, String name) { if (obj == null) { throw new IllegalArgumentException(name + " cannot be null"); } return obj; }
3.68
hbase_Server_getAsyncConnection
/** * Returns a reference to the servers' async connection. * <p/> * Important note: this method returns a reference to Connection which is managed by Server * itself, so callers must NOT attempt to close connection obtained. */ default AsyncConnection getAsyncConnection() { return getAsyncClusterConnection(); }
3.68
hbase_RecoverLeaseFSUtils_recoverLease
/** * Try to recover the lease. * @return True if dfs#recoverLease came by true. */ private static boolean recoverLease(final DistributedFileSystem dfs, final int nbAttempt, final Path p, final long startWaiting) throws FileNotFoundException { boolean recovered = false; try { recovered = dfs.recoverLease(p); LOG.info((recovered ? "Recovered lease, " : "Failed to recover lease, ") + getLogMessageDetail(nbAttempt, p, startWaiting)); } catch (IOException e) { if (e instanceof LeaseExpiredException && e.getMessage().contains("File does not exist")) { // This exception comes out instead of FNFE, fix it throw new FileNotFoundException("The given WAL wasn't found at " + p); } else if (e instanceof FileNotFoundException) { throw (FileNotFoundException) e; } LOG.warn(getLogMessageDetail(nbAttempt, p, startWaiting), e); } return recovered; }
3.68
framework_VTabsheet_isClipped
/** * Checks whether the given tab is clipped out of view (hidden behind the * scroller element or overflowing further). Does not check whether hiding * the scroller element would bring this tab fully into view. * * @return {@code true} if the given tab is clipped, {@code false} otherwise */ private boolean isClipped(Tab tab) { return tab.getAbsoluteLeft() + tab.getOffsetWidth() > getAbsoluteLeft() + getOffsetWidth() - scroller.getOffsetWidth(); }
3.68
framework_AbstractRemoteDataSource_canFetchData
/** * Checks if it is possible to currently fetch data from the remote data * source. * * @return <code>true</code> if it is ok to try to fetch data, * <code>false</code> if it is known that fetching data will fail * and should not be tried right now. * @since 7.7.2 */ protected boolean canFetchData() { return true; }
3.68
rocketmq-connect_DelegatingClassLoader_pluginClassLoader
/** * Retrieve the PluginClassLoader associated with a plugin class * * @param name * @return */ public PluginClassLoader pluginClassLoader(String name) { if (StringUtils.isEmpty(name) || StringUtils.isBlank(name)) { return null; } if (!PluginUtils.shouldLoadInIsolation(name)) { return null; } SortedMap<PluginWrapper<?>, ClassLoader> inner = pluginLoaders.get(name); if (inner == null) { return null; } ClassLoader pluginLoader = inner.get(inner.lastKey()); return pluginLoader instanceof PluginClassLoader ? (PluginClassLoader) pluginLoader : null; }
3.68
morf_InsertStatementBuilder_useParallelDml
/** * Request that this statement is executed with a parallel execution plan for data manipulation language (DML). This request will have no effect unless the database implementation supports it and the feature is enabled. * * <p>For statement that will affect a high percentage or rows in the table, a parallel execution plan may reduce the execution time, although the exact effect depends on * the underlying database, the nature of the data and the nature of the query.</p> * * <p>Note that the use of parallel DML comes with restrictions, in particular, a table may not be accessed in the same transaction following a parallel DML execution. Please consult the Oracle manual section <em>Restrictions on Parallel DML</em> to check whether this hint is suitable.</p> * * @param degreeOfParallelism - the degree of parallelism to be used * @return this, for method chaining. */ public InsertStatementBuilder useParallelDml(int degreeOfParallelism) { getHints().add(new UseParallelDml(degreeOfParallelism)); return this; }
3.68
flink_InputTypeStrategies_explicitSequence
/** * Strategy for a named function signature of explicitly defined types like {@code f(s STRING, i * INT)}. Implicit casts will be inserted if possible. * * <p>This is equivalent to using {@link #sequence(String[], ArgumentTypeStrategy[])} and {@link * #explicit(DataType)}. */ public static InputTypeStrategy explicitSequence( String[] argumentNames, DataType[] expectedDataTypes) { final List<ArgumentTypeStrategy> strategies = Arrays.stream(expectedDataTypes) .map(InputTypeStrategies::explicit) .collect(Collectors.toList()); return new SequenceInputTypeStrategy(strategies, Arrays.asList(argumentNames)); }
3.68
graphhopper_VectorTile_hasDoubleValue
/** * <code>optional double double_value = 3;</code> */ public boolean hasDoubleValue() { return ((bitField0_ & 0x00000004) == 0x00000004); }
3.68
hibernate-validator_ConstrainedExecutable_isConstrained
/** * Whether the represented executable is constrained or not. This is the case if * it has at least one constrained parameter, at least one parameter marked * for cascaded validation, at least one cross-parameter constraint, at * least one return value constraint or if the return value is marked for * cascaded validation. * * @return {@code True} if this executable is constrained by any means, * {@code false} otherwise. */ @Override public boolean isConstrained() { return super.isConstrained() || hasParameterConstraints; }
3.68
hadoop_IOStatisticsStoreImpl_setAtomicLong
/** * Set an atomic long to a value. * @param aLong atomic long; may be null * @param value value to set to */ private void setAtomicLong(final AtomicLong aLong, final long value) { if (aLong != null) { aLong.set(value); } }
3.68
flink_FlinkContainersSettings_taskManagerHostnamePrefix
/** * Sets the {@code taskManagerHostnamePrefix} and returns a reference to this Builder * enabling method chaining. * * @param taskManagerHostnamePrefix The {@code taskManagerHostnamePrefix} to set. * @return A reference to this Builder. */ public Builder taskManagerHostnamePrefix(String taskManagerHostnamePrefix) { this.taskManagerHostnamePrefix = taskManagerHostnamePrefix; return this; }
3.68
streampipes_SimpleBlockFusionProcessor_getInstance
/** * Returns the singleton instance for BlockFusionProcessor. */ public static SimpleBlockFusionProcessor getInstance() { return INSTANCE; }
3.68
querydsl_DateExpression_year
/** * Create a year expression * * @return year */ public NumberExpression<Integer> year() { if (year == null) { year = Expressions.numberOperation(Integer.class, Ops.DateTimeOps.YEAR, mixin); } return year; }
3.68
hudi_HoodieAdbJdbcClient_updateTableDefinition
/** * TODO align with {@link org.apache.hudi.sync.common.HoodieMetaSyncOperations#updateTableSchema} */ public void updateTableDefinition(String tableName, SchemaDifference schemaDiff) { LOG.info("Adding columns for table:{}", tableName); schemaDiff.getAddColumnTypes().forEach((columnName, columnType) -> executeAdbSql(constructAddColumnSql(tableName, columnName, columnType)) ); LOG.info("Updating columns' definition for table:{}", tableName); schemaDiff.getUpdateColumnTypes().forEach((columnName, columnType) -> executeAdbSql(constructChangeColumnSql(tableName, columnName, columnType)) ); }
3.68
dubbo_DubboProtocol_createInvocation
/** * FIXME channel.getUrl() always binds to a fixed service, and this service is random. * we can choose to use a common service to carry onConnect event if there's no easy way to get the specific * service this connection is binding to. * @param channel * @param url * @param methodKey * @return */ private Invocation createInvocation(Channel channel, URL url, String methodKey) { String method = url.getParameter(methodKey); if (method == null || method.length() == 0) { return null; } RpcInvocation invocation = new RpcInvocation( url.getServiceModel(), method, url.getParameter(INTERFACE_KEY), "", new Class<?>[0], new Object[0]); invocation.setAttachment(PATH_KEY, url.getPath()); invocation.setAttachment(GROUP_KEY, url.getGroup()); invocation.setAttachment(INTERFACE_KEY, url.getParameter(INTERFACE_KEY)); invocation.setAttachment(VERSION_KEY, url.getVersion()); if (url.getParameter(STUB_EVENT_KEY, false)) { invocation.setAttachment(STUB_EVENT_KEY, Boolean.TRUE.toString()); } return invocation; }
3.68
flink_ExecutionConfig_getLatencyTrackingInterval
/** * Returns the latency tracking interval. * * @return The latency tracking interval in milliseconds */ @PublicEvolving public long getLatencyTrackingInterval() { return configuration.get(MetricOptions.LATENCY_INTERVAL); }
3.68
flink_MaxwellJsonFormatFactory_validateDecodingFormatOptions
/** Validator for maxwell decoding format. */ private static void validateDecodingFormatOptions(ReadableConfig tableOptions) { JsonFormatOptionsUtil.validateDecodingFormatOptions(tableOptions); }
3.68
flink_JavaFieldPredicates_isAssignableTo
/** * Match the {@link Class} of the {@link JavaField}'s assignability. * * @param clazz the Class type to check for assignability * @return a {@link DescribedPredicate} that returns {@code true}, if the respective {@link * JavaField} is assignable to the supplied {@code clazz}. */ public static DescribedPredicate<JavaField> isAssignableTo(Class<?> clazz) { return DescribedPredicate.describe( "is assignable to " + clazz.getSimpleName(), field -> field.getRawType().isAssignableTo(clazz)); }
3.68
graphhopper_RamerDouglasPeucker_setMaxDistance
/** * maximum distance of discrepancy (from the normal way) in meter */ public RamerDouglasPeucker setMaxDistance(double dist) { this.normedMaxDist = calc.calcNormalizedDist(dist); this.maxDistance = dist; return this; }
3.68
flink_HadoopFileStatus_fromHadoopStatus
/** * Creates a new {@code HadoopFileStatus} from Hadoop's {@link org.apache.hadoop.fs.FileStatus}. * If Hadoop's file status is <i>located</i>, i.e., it contains block information, then this * method returns an implementation of Flink's {@link * org.apache.flink.core.fs.LocatedFileStatus}. */ public static HadoopFileStatus fromHadoopStatus( final org.apache.hadoop.fs.FileStatus fileStatus) { return fileStatus instanceof org.apache.hadoop.fs.LocatedFileStatus ? new LocatedHadoopFileStatus((org.apache.hadoop.fs.LocatedFileStatus) fileStatus) : new HadoopFileStatus(fileStatus); }
3.68
hbase_CellComparatorImpl_compareQualifiers
/** * Compare the qualifiers part of the left and right cells. * @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise */ @Override public final int compareQualifiers(Cell left, Cell right) { if ((left instanceof ByteBufferKeyValue) && (right instanceof ByteBufferKeyValue)) { return compareQualifiers((ByteBufferKeyValue) left, (ByteBufferKeyValue) right); } else if ((left instanceof KeyValue) && (right instanceof KeyValue)) { return compareQualifiers((KeyValue) left, (KeyValue) right); } else if ((left instanceof KeyValue) && (right instanceof ByteBufferKeyValue)) { return compareQualifiers((KeyValue) left, (ByteBufferKeyValue) right); } else if ((left instanceof ByteBufferKeyValue) && (right instanceof KeyValue)) { return compareQualifiers((ByteBufferKeyValue) left, (KeyValue) right); } else { if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), ((ByteBufferExtendedCell) left).getQualifierPosition(), left.getQualifierLength(), ((ByteBufferExtendedCell) right).getQualifierByteBuffer(), ((ByteBufferExtendedCell) right).getQualifierPosition(), right.getQualifierLength()); } if (left instanceof ByteBufferExtendedCell) { return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), ((ByteBufferExtendedCell) left).getQualifierPosition(), left.getQualifierLength(), right.getQualifierArray(), right.getQualifierOffset(), right.getQualifierLength()); } if (right instanceof ByteBufferExtendedCell) { // Notice how we flip the order of the compare here. We used to negate the return value but // see what FindBugs says // http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO // It suggest flipping the order to get same effect and 'safer'. return ByteBufferUtils.compareTo(left.getQualifierArray(), left.getQualifierOffset(), left.getQualifierLength(), ((ByteBufferExtendedCell) right).getQualifierByteBuffer(), ((ByteBufferExtendedCell) right).getQualifierPosition(), right.getQualifierLength()); } return Bytes.compareTo(left.getQualifierArray(), left.getQualifierOffset(), left.getQualifierLength(), right.getQualifierArray(), right.getQualifierOffset(), right.getQualifierLength()); } }
3.68
flink_CoGroupOperator_equalTo
/** * Continues a CoGroup transformation and defines a {@link KeySelector} function for the * second co-grouped {@link DataSet}. * * <p>The KeySelector function is called for each element of the second DataSet and * extracts a single key value on which the DataSet is grouped. * * @param keyExtractor The KeySelector function which extracts the key values from the * second DataSet on which it is grouped. * @return An incomplete CoGroup transformation. Call {@link * org.apache.flink.api.java.operators.CoGroupOperator.CoGroupOperatorSets.CoGroupOperatorSetsPredicate.CoGroupOperatorWithoutFunction#with(org.apache.flink.api.common.functions.CoGroupFunction)} * to finalize the CoGroup transformation. */ public <K> CoGroupOperatorWithoutFunction equalTo(KeySelector<I2, K> keyExtractor) { TypeInformation<K> keyType = TypeExtractor.getKeySelectorTypes(keyExtractor, input2.getType()); return createCoGroupOperator( new SelectorFunctionKeys<>( input1.clean(keyExtractor), input2.getType(), keyType)); }
3.68
querydsl_QueryBase_set
/** * Set the given parameter to the given value * * @param <P> * @param param param * @param value binding * @return the current object */ public <P> Q set(ParamExpression<P> param, P value) { return queryMixin.set(param, value); }
3.68
framework_DesignContext_getComponentById
/** * Returns a component having the specified global id. If no component is * found, returns null. * * @param globalId * The global id of the component * @return a component whose global id equals globalId */ public Component getComponentById(String globalId) { return idToComponent.get(globalId); }
3.68
hadoop_JobTokenSecretManager_createSecretKey
/** * Convert the byte[] to a secret key * @param key the byte[] to create the secret key from * @return the secret key */ public static SecretKey createSecretKey(byte[] key) { return SecretManager.createSecretKey(key); }
3.68
flink_StreamExecutionEnvironment_getRestartStrategy
/** * Returns the specified restart strategy configuration. * * @return The restart strategy configuration to be used */ @PublicEvolving public RestartStrategies.RestartStrategyConfiguration getRestartStrategy() { return config.getRestartStrategy(); }
3.68
hadoop_AbstractS3ACommitter_maybeSaveSummary
/** * Save a summary to the report dir if the config option * is set. * The report will be updated with the current active stage, * and if {@code thrown} is non-null, it will be added to the * diagnostics (and the job tagged as a failure). * Static for testability. * @param activeStage active stage * @param context commit context. * @param report summary file. * @param thrown any exception indicting failure. * @param quiet should exceptions be swallowed. * @param overwrite should the existing file be overwritten * @return the path of a file, if successfully saved * @throws IOException if a failure occured and quiet==false */ private static Path maybeSaveSummary( String activeStage, CommitContext context, SuccessData report, Throwable thrown, boolean quiet, boolean overwrite) throws IOException { Configuration conf = context.getConf(); String reportDir = conf.getTrimmed(OPT_SUMMARY_REPORT_DIR, ""); if (reportDir.isEmpty()) { LOG.debug("No summary directory set in " + OPT_SUMMARY_REPORT_DIR); return null; } LOG.debug("Summary directory set to {}", reportDir); Path reportDirPath = new Path(reportDir); Path path = new Path(reportDirPath, createJobSummaryFilename(context.getJobId())); if (thrown != null) { report.recordJobFailure(thrown); } report.putDiagnostic(STAGE, activeStage); // the store operations here is explicitly created for the FS where // the reports go, which may not be the target FS of the job. final FileSystem fs = path.getFileSystem(conf); try (ManifestStoreOperations operations = new ManifestStoreOperationsThroughFileSystem( fs)) { if (!overwrite) { // check for file existence so there is no need to worry about // precisely what exception is raised when overwrite=false and dest file // exists try { FileStatus st = operations.getFileStatus(path); // get here and the file exists LOG.debug("Report already exists: {}", st); return null; } catch (FileNotFoundException ignored) { } } report.save(fs, path, SuccessData.serializer()); LOG.info("Job summary saved to {}", path); return path; } catch (IOException e) { LOG.debug("Failed to save summary to {}", path, e); if (quiet) { return null; } else { throw e; } } }
3.68
pulsar_ObjectMapperFactory_clearCaches
/** * Clears the caches tied to the ObjectMapper instances and replaces the singleton ObjectMapper instance. * * This can be used in tests to ensure that classloaders and class references don't leak across tests. */ public static void clearCaches() { clearTypeFactoryCache(getMapper().getObjectMapper()); clearTypeFactoryCache(getYamlMapper().getObjectMapper()); clearTypeFactoryCache(getMapperWithIncludeAlways().getObjectMapper()); replaceSingletonInstances(); }
3.68
dubbo_URLParam_parse
/** * Parse URLParam * Init URLParam by constructor is not allowed * * @param params params map added into URLParam * @param rawParam original rawParam string, directly add to rawParam field, * will not affect real key-pairs store in URLParam. * Please make sure it can correspond with params or will * cause unexpected result when calling {@link URLParam#getRawParam()} * and {@link URLParam#toString()} ()}. If you not sure, you can call * {@link URLParam#parse(String)} to init. * @return a new URLParam */ public static URLParam parse(Map<String, String> params, String rawParam) { if (CollectionUtils.isNotEmptyMap(params)) { int capacity = (int) (params.size() / .75f) + 1; BitSet keyBit = new BitSet(capacity); Map<Integer, Integer> valueMap = new HashMap<>(capacity); Map<String, String> extraParam = new HashMap<>(capacity); Map<String, Map<String, String>> methodParameters = new HashMap<>(capacity); for (Map.Entry<String, String> entry : params.entrySet()) { String key = entry.getKey(); String value = entry.getValue(); addParameter(keyBit, valueMap, extraParam, methodParameters, key, value, false); // compatible with lower versions registering "default." keys if (key.startsWith(DEFAULT_KEY_PREFIX)) { addParameter( keyBit, valueMap, extraParam, methodParameters, key.substring(DEFAULT_KEY_PREFIX.length()), value, true); } } return new URLParam(keyBit, valueMap, extraParam, methodParameters, rawParam); } else { return EMPTY_PARAM; } }
3.68
flink_FlinkRelMetadataQuery_flinkDistribution
/** * Returns the {@link FlinkRelDistribution} statistic. * * @param rel the relational expression * @return description of how the rows in the relational expression are physically distributed */ public FlinkRelDistribution flinkDistribution(RelNode rel) { for (; ; ) { try { return distributionHandler.flinkDistribution(rel, this); } catch (JaninoRelMetadataProvider.NoHandler e) { distributionHandler = revise(e.relClass, FlinkMetadata.FlinkDistribution.DEF); } } }
3.68
hadoop_ListResultEntrySchema_withLastModified
/** * Set the lastModified value. * * @param lastModified the lastModified value to set * @return the ListEntrySchema object itself. */ public ListResultEntrySchema withLastModified(String lastModified) { this.lastModified = lastModified; return this; }
3.68
flink_DefaultCheckpointPlanCalculator_calculateWithAllTasksRunning
/** * Computes the checkpoint plan when all tasks are running. It would simply marks all the source * tasks as need to trigger and all the tasks as need to wait and commit. * * @return The plan of this checkpoint. */ private CheckpointPlan calculateWithAllTasksRunning() { List<Execution> executionsToTrigger = sourceTasks.stream() .map(ExecutionVertex::getCurrentExecutionAttempt) .collect(Collectors.toList()); List<Execution> tasksToWaitFor = createTaskToWaitFor(allTasks); return new DefaultCheckpointPlan( Collections.unmodifiableList(executionsToTrigger), Collections.unmodifiableList(tasksToWaitFor), Collections.unmodifiableList(allTasks), Collections.emptyList(), Collections.emptyList(), allowCheckpointsAfterTasksFinished); }
3.68
flink_EdgeManagerBuildUtil_connectVertexToResult
/** * Calculate the connections between {@link ExecutionJobVertex} and {@link IntermediateResult} * * based on the {@link DistributionPattern}. * * @param vertex the downstream consumer {@link ExecutionJobVertex} * @param intermediateResult the upstream consumed {@link IntermediateResult} */ static void connectVertexToResult( ExecutionJobVertex vertex, IntermediateResult intermediateResult) { final DistributionPattern distributionPattern = intermediateResult.getConsumingDistributionPattern(); final JobVertexInputInfo jobVertexInputInfo = vertex.getGraph() .getJobVertexInputInfo(vertex.getJobVertexId(), intermediateResult.getId()); switch (distributionPattern) { case POINTWISE: connectPointwise(vertex, intermediateResult, jobVertexInputInfo); break; case ALL_TO_ALL: connectAllToAll(vertex, intermediateResult, jobVertexInputInfo); break; default: throw new IllegalArgumentException("Unrecognized distribution pattern."); } }
3.68
hbase_MobStoreScanner_next
/** * Firstly reads the cells from the HBase. If the cell are a reference cell (which has the * reference tag), the scanner need seek this cell from the mob file, and use the cell found from * the mob file as the result. */ @Override public boolean next(List<Cell> outResult, ScannerContext ctx) throws IOException { boolean result = super.next(outResult, ctx); if (!rawMobScan) { // retrieve the mob data if (outResult.isEmpty()) { return result; } long mobKVCount = 0; long mobKVSize = 0; for (int i = 0; i < outResult.size(); i++) { Cell cell = outResult.get(i); if (MobUtils.isMobReferenceCell(cell)) { MobCell mobCell = mobStore.resolve(cell, cacheMobBlocks, readPt, readEmptyValueOnMobCellMiss); mobKVCount++; mobKVSize += mobCell.getCell().getValueLength(); outResult.set(i, mobCell.getCell()); // Keep the MobCell here unless we shipped the RPC or close the scanner. referencedMobCells.add(mobCell); } } mobStore.updateMobScanCellsCount(mobKVCount); mobStore.updateMobScanCellsSize(mobKVSize); } return result; }
3.68
rocketmq-connect_Deserializer_close
/** * Close this deserializer. * <p> * This method must be idempotent as it may be called multiple times. */ @Override default void close() { // intentionally left blank }
3.68
hbase_HRegionServer_getRegion
/** * Protected Utility method for safely obtaining an HRegion handle. * @param regionName Name of online {@link HRegion} to return * @return {@link HRegion} for <code>regionName</code> */ protected HRegion getRegion(final byte[] regionName) throws NotServingRegionException { String encodedRegionName = RegionInfo.encodeRegionName(regionName); return getRegionByEncodedName(regionName, encodedRegionName); }
3.68
morf_AbstractSqlDialectTest_testSelectOrderByScript
/** * Tests a select with an "order by" clause. */ @Test public void testSelectOrderByScript() { SelectStatement stmt = new SelectStatement(new FieldReference(STRING_FIELD)) .from(new TableReference(ALTERNATE_TABLE)) .orderBy(new FieldReference(STRING_FIELD)); String expectedSql = "SELECT stringField FROM " + tableName(ALTERNATE_TABLE) + " ORDER BY stringField"; if (!nullOrder().equals(StringUtils.EMPTY)) { expectedSql = expectedSql + " " + nullOrder(); } assertEquals("Select with order by", expectedSql, testDialect.convertStatementToSQL(stmt)); }
3.68
hbase_MasterRegistry_parseMasterAddrs
/** * Parses the list of master addresses from the provided configuration. Supported format is comma * separated host[:port] values. If no port number if specified, default master port is assumed. * @param conf Configuration to parse from. */ public static Set<ServerName> parseMasterAddrs(Configuration conf) throws UnknownHostException { final int defaultPort = getDefaultMasterPort(conf); final Set<ServerName> masterAddrs = new HashSet<>(); final String configuredMasters = getMasterAddr(conf); for (String masterAddr : Splitter.onPattern(MASTER_ADDRS_CONF_SEPARATOR) .split(configuredMasters)) { final HostAndPort masterHostPort = HostAndPort.fromString(masterAddr.trim()).withDefaultPort(defaultPort); masterAddrs.add(ServerName.valueOf(masterHostPort.toString(), ServerName.NON_STARTCODE)); } Preconditions.checkArgument(!masterAddrs.isEmpty(), "At least one master address is needed"); return masterAddrs; }
3.68
dubbo_ServiceDiscoveryRegistry_createServiceDiscovery
/** * Create the {@link ServiceDiscovery} from the registry {@link URL} * * @param registryURL the {@link URL} to connect the registry * @return non-null */ protected ServiceDiscovery createServiceDiscovery(URL registryURL) { return getServiceDiscovery(registryURL .addParameter(INTERFACE_KEY, ServiceDiscovery.class.getName()) .removeParameter(REGISTRY_TYPE_KEY)); }
3.68
framework_Page_addDependency
/** * Add a dependency that should be added to the current page. * <p> * These dependencies are always added before the dependencies included by * using the annotations {@link HtmlImport}, {@link JavaScript} and * {@link StyleSheet} during the same request. * <p> * Please note that these dependencies are always sent to the client side * and not filtered out by any {@link DependencyFilter}. * * @param dependency * the dependency to add * @since 8.1 */ public void addDependency(Dependency dependency) { if (pendingDependencies == null) { pendingDependencies = new ArrayList<>(); } pendingDependencies.add(dependency); }
3.68
flink_ByteBufUtils_accumulate
/** * Accumulates data from <tt>source</tt> to <tt>target</tt>. If no data has been accumulated yet * and <tt>source</tt> has enough data, <tt>source</tt> will be returned directly. Otherwise, * data will be copied into <tt>target</tt>. If the size of data copied after this operation has * reached <tt>targetAccumulationSize</tt>, <tt>target</tt> will be returned, otherwise * <tt>null</tt> will be returned to indicate more data is required. * * @param target The target buffer. * @param source The source buffer. * @param targetAccumulationSize The target size of data to accumulate. * @param accumulatedSize The size of data accumulated so far. * @return The ByteBuf containing accumulated data. If not enough data has been accumulated, * <tt>null</tt> will be returned. */ @Nullable public static ByteBuf accumulate( ByteBuf target, ByteBuf source, int targetAccumulationSize, int accumulatedSize) { if (accumulatedSize == 0 && source.readableBytes() >= targetAccumulationSize) { return source; } int copyLength = Math.min(source.readableBytes(), targetAccumulationSize - accumulatedSize); if (copyLength > 0) { target.writeBytes(source, copyLength); } if (accumulatedSize + copyLength == targetAccumulationSize) { return target; } return null; }
3.68
framework_LegacyWindow_getBrowserWindowWidth
/** * Gets the last known width of the browser window in which this UI resides. * * @return the browser window width in pixels * * @deprecated As of 7.0, use the similarly named api in Page instead */ @Deprecated public int getBrowserWindowWidth() { return getPage().getBrowserWindowWidth(); }
3.68
pulsar_MultiTopicsConsumerImpl_handleSubscribeOneTopicError
// handling failure during subscribe new topic, unsubscribe success created partitions private void handleSubscribeOneTopicError(String topicName, Throwable error, CompletableFuture<Void> subscribeFuture) { log.warn("[{}] Failed to subscribe for topic [{}] in topics consumer {}", topic, topicName, error.getMessage()); client.externalExecutorProvider().getExecutor().execute(() -> { AtomicInteger toCloseNum = new AtomicInteger(0); List<ConsumerImpl> filterConsumers = consumers.values().stream().filter(consumer1 -> { String consumerTopicName = consumer1.getTopic(); if (TopicName.get(consumerTopicName).getPartitionedTopicName() .equals(TopicName.get(topicName).getPartitionedTopicName())) { toCloseNum.incrementAndGet(); return true; } else { return false; } }).collect(Collectors.toList()); if (filterConsumers.isEmpty()) { subscribeFuture.completeExceptionally(error); return; } filterConsumers.forEach(consumer2 -> { consumer2.closeAsync().whenComplete((r, ex) -> { consumer2.subscribeFuture().completeExceptionally(error); allTopicPartitionsNumber.decrementAndGet(); consumers.remove(consumer2.getTopic()); if (toCloseNum.decrementAndGet() == 0) { log.warn("[{}] Failed to subscribe for topic [{}] in topics consumer, subscribe error: {}", topic, topicName, error.getMessage()); removeTopic(topicName); subscribeFuture.completeExceptionally(error); } return; }); }); }); }
3.68
hadoop_Utils_size
/** * Get the size of the serialized Version object. * * @return serialized size of the version object. */ public static int size() { return (Short.SIZE + Short.SIZE) / Byte.SIZE; }
3.68
hbase_FlushNonSloppyStoresFirstPolicy_selectStoresToFlush
/** Returns the stores need to be flushed. */ @Override public Collection<HStore> selectStoresToFlush() { Collection<HStore> specificStoresToFlush = new HashSet<>(); for (HStore store : regularStores) { if (shouldFlush(store) || region.shouldFlushStore(store)) { specificStoresToFlush.add(store); } } if (!specificStoresToFlush.isEmpty()) { return specificStoresToFlush; } for (HStore store : sloppyStores) { if (shouldFlush(store)) { specificStoresToFlush.add(store); } } if (!specificStoresToFlush.isEmpty()) { return specificStoresToFlush; } return region.stores.values(); }
3.68
hadoop_FederationStateStoreFacade_getApplicationsHomeSubCluster
/** * Get the {@code ApplicationHomeSubCluster} list representing the mapping of * all submitted applications to it's home sub-cluster. * * @return the mapping of all submitted application to it's home sub-cluster * @throws YarnException if the request is invalid/fails */ public List<ApplicationHomeSubCluster> getApplicationsHomeSubCluster() throws YarnException { GetApplicationsHomeSubClusterResponse response = stateStore.getApplicationsHomeSubCluster( GetApplicationsHomeSubClusterRequest.newInstance()); return response.getAppsHomeSubClusters(); }
3.68
hbase_Mutation_setCellVisibility
/** * Sets the visibility expression associated with cells in this Mutation. */ public Mutation setCellVisibility(CellVisibility expression) { this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY, toCellVisibility(expression).toByteArray()); return this; }
3.68
hadoop_TwoColumnLayout_setTableStyles
/** * Sets up a table to be a consistent style. * @param html the HTML to use to render. * @param tableId the ID of the table to set styles on. * @param innerStyles any other styles to add to the table. */ protected void setTableStyles(Page.HTML<__> html, String tableId, String... innerStyles) { List<String> styles = Lists.newArrayList(); styles.add(join('#', tableId, "_paginate span {font-weight:normal}")); styles.add(join('#', tableId, " .progress {width:8em}")); styles.add(join('#', tableId, "_processing {top:-1.5em; font-size:1em;")); styles.add(" color:#000; background:#fefefe}"); for (String style : innerStyles) { styles.add(join('#', tableId, " ", style)); } html.style(styles.toArray()); }
3.68
rocketmq-connect_JdbcSourceTask_start
/** * start jdbc task */ @Override public void start(KeyValue props) { // init config config = new JdbcSourceTaskConfig(props); this.dialect = DatabaseDialectLoader.getDatabaseDialect(config); cachedConnectionProvider = connectionProvider(config.getAttempts(), config.getBackoffMs()); log.info("Using JDBC dialect {}", dialect.name()); // compute table offset Map<String, Map<String, Object>> offsetValues = SourceOffsetCompute.initOffset(config, sourceTaskContext, dialect, cachedConnectionProvider); for (String tableOrQuery : offsetValues.keySet()) { this.buildAndAddQuerier( TableLoadMode.findTableLoadModeByName(this.config.getMode()), this.config.getQuerySuffix(), this.config.getIncrementingColumnName(), this.config.getTimestampColumnNames(), this.config.getTimestampDelayIntervalMs(), this.config.getTimeZone(), tableOrQuery, offsetValues.get(tableOrQuery) ); } running.set(true); log.info("Started JDBC source task"); }
3.68
framework_VDebugWindow_getFontSize
/** * Gets the font size currently in use. * * @return */ private int getFontSize() { return fontSize; }
3.68
framework_LegacyApplication_setMainWindow
/** * Sets the main window of this application. Setting window as a main window * of this application also adds the window to this application. * * @param mainWindow * the UI to set as the default window */ public void setMainWindow(LegacyWindow mainWindow) { if (this.mainWindow != null) { throw new IllegalStateException("mainWindow has already been set"); } if (mainWindow.isAttached()) { throw new IllegalStateException( "mainWindow is attached to another application"); } if (UI.getCurrent() == null) { // Assume setting a main window from Application.init if there's // no current UI -> set the main window as the current UI UI.setCurrent(mainWindow); } addWindow(mainWindow); this.mainWindow = mainWindow; }
3.68
framework_AbstractComponentConnector_isSignificantMove
// mostly copy-pasted code from VScrollTable // TODO refactor main logic to a common class private boolean isSignificantMove(TouchMoveEvent event) { if (longTouchTimer == null) { // no touch start return false; } // Calculate the distance between touch start and the current // touch // position Touch touch = event.getChangedTouches().get(0); int deltaX = touch.getClientX() - touchStartX; int deltaY = touch.getClientY() - touchStartY; int delta = deltaX * deltaX + deltaY * deltaY; // Compare to the square of the significant move threshold to // remove the need for a square root if (delta > SIGNIFICANT_MOVE_THRESHOLD * SIGNIFICANT_MOVE_THRESHOLD) { return true; } return false; }
3.68
hbase_RegionCoprocessorHost_postCheckAndMutate
/** * @param checkAndMutate the CheckAndMutate object * @param result the result returned by the checkAndMutate * @return true or false to return to client if default processing should be bypassed, or null * otherwise * @throws IOException if an error occurred on the coprocessor */ public CheckAndMutateResult postCheckAndMutate(CheckAndMutate checkAndMutate, CheckAndMutateResult result) throws IOException { if (this.coprocEnvironments.isEmpty()) { return result; } return execOperationWithResult( new ObserverOperationWithResult<RegionObserver, CheckAndMutateResult>(regionObserverGetter, result) { @Override public CheckAndMutateResult call(RegionObserver observer) throws IOException { return observer.postCheckAndMutate(this, checkAndMutate, getResult()); } }); }
3.68
framework_CssLayout_readDesign
/* * (non-Javadoc) * * @see com.vaadin.ui.AbstractComponent#readDesign(org.jsoup.nodes .Element, * com.vaadin.ui.declarative.DesignContext) */ @Override public void readDesign(Element design, DesignContext designContext) { // process default attributes super.readDesign(design, designContext); // handle children for (Element childComponent : design.children()) { Component newChild = designContext.readDesign(childComponent); addComponent(newChild); } }
3.68
flink_TableDescriptor_comment
/** Define the comment for this table. */ public Builder comment(@Nullable String comment) { this.comment = comment; return this; }
3.68
flink_MurmurHashUtil_hashUnsafeBytesByWords
/** * Hash unsafe bytes, length must be aligned to 4 bytes. * * @param base base unsafe object * @param offset offset for unsafe object * @param lengthInBytes length in bytes * @return hash code */ public static int hashUnsafeBytesByWords(Object base, long offset, int lengthInBytes) { return hashUnsafeBytesByWords(base, offset, lengthInBytes, DEFAULT_SEED); }
3.68
hbase_LruAdaptiveBlockCache_isEnteringRun
/** * Used for the test. */ boolean isEnteringRun() { return this.enteringRun; }
3.68
framework_DefaultConnectionStateHandler_getDialogText
/** * Gets the text to show in the reconnect dialog. * * @param reconnectAttempt * The number of the current reconnection attempt * @return The text to show in the reconnect dialog */ protected String getDialogText(int reconnectAttempt) { return getConfiguration().dialogText.replace("{0}", reconnectAttempt + ""); }
3.68
hadoop_BlockBlobAppendStream_generateNewerVersionBlockId
/** * Helper method that generates an newer (4.2.0) version blockId. * @return String representing the block ID generated. */ private String generateNewerVersionBlockId(String prefix, long id) { String blockIdSuffix = String.format("%06d", id); byte[] blockIdInBytes = (prefix + blockIdSuffix).getBytes(StandardCharsets.UTF_8); return new String(Base64.encodeBase64(blockIdInBytes), StandardCharsets.UTF_8); }
3.68
hbase_ScannerContext_getSizeScope
/** Returns {@link LimitScope} indicating scope in which the size limit is enforced */ LimitScope getSizeScope() { return this.sizeScope; }
3.68
hbase_MetricsREST_incrementFailedDeleteRequests
/** * @param inc How much to add to failedDeleteCount. */ public void incrementFailedDeleteRequests(final int inc) { source.incrementFailedDeleteRequests(inc); }
3.68
morf_AbstractSqlDialectTest_testAlterBooleanColumn
/** * Test altering a boolean column. */ @Test public void testAlterBooleanColumn() { testAlterTableColumn(TEST_TABLE, AlterationType.ALTER, getColumn(TEST_TABLE, BOOLEAN_FIELD), column(BOOLEAN_FIELD, DataType.BOOLEAN), expectedAlterTableAlterBooleanColumnStatement()); }
3.68
hudi_HoodieJavaRDD_getJavaRDD
/** * @param hoodieData {@link HoodieJavaRDD <T>} instance containing the {@link JavaRDD} of objects. * @param <T> type of object. * @return the a {@link JavaRDD} of objects in type T. */ public static <T> JavaRDD<T> getJavaRDD(HoodieData<T> hoodieData) { return ((HoodieJavaRDD<T>) hoodieData).rddData; }
3.68
hbase_NewVersionBehaviorTracker_isDeleted
/** * This method is not idempotent, we will save some info to judge VERSION_MASKED. * @param cell - current cell to check if deleted by a previously seen delete * @return We don't distinguish DeleteColumn and DeleteFamily. We only return code for column. */ @Override public DeleteResult isDeleted(Cell cell) { long duplicateMvcc = prepare(cell); for (Map.Entry<Long, DeleteVersionsNode> e : delColMap.tailMap(cell.getSequenceId()) .entrySet()) { DeleteVersionsNode node = e.getValue(); long deleteMvcc = Long.MAX_VALUE; SortedSet<Long> deleteVersionMvccs = node.deletesMap.get(cell.getTimestamp()); if (deleteVersionMvccs != null) { SortedSet<Long> tail = deleteVersionMvccs.tailSet(cell.getSequenceId()); if (!tail.isEmpty()) { deleteMvcc = tail.first(); } } SortedMap<Long, SortedSet<Long>> subMap = node.mvccCountingMap.subMap(cell.getSequenceId(), true, Math.min(duplicateMvcc, deleteMvcc), true); for (Map.Entry<Long, SortedSet<Long>> seg : subMap.entrySet()) { if (seg.getValue().size() >= maxVersions) { return DeleteResult.VERSION_MASKED; } seg.getValue().add(cell.getSequenceId()); } if (deleteMvcc < Long.MAX_VALUE) { return DeleteResult.VERSION_DELETED; } if (cell.getTimestamp() <= node.ts) { return DeleteResult.COLUMN_DELETED; } } if (duplicateMvcc < Long.MAX_VALUE) { return DeleteResult.VERSION_MASKED; } return DeleteResult.NOT_DELETED; }
3.68
hbase_LockServiceClient_namespaceLock
/** * LocCreate a new EntityLock object to acquire exclusive lock on a namespace. Clients can not * acquire shared locks on namespace. */ public EntityLock namespaceLock(String namespace, String description, Abortable abort) { LockRequest lockRequest = buildLockRequest(LockType.EXCLUSIVE, namespace, null, null, description, ng.getNonceGroup(), ng.newNonce()); return new EntityLock(conf, stub, lockRequest, abort); }
3.68
hbase_ReplicationSourceWALReader_updateReplicationMarkerEdit
/* * Create @ReplicationMarkerDescriptor with region_server_name, wal_name and offset and set to * cell's value. */ private void updateReplicationMarkerEdit(Entry entry, long offset) { WALEdit edit = entry.getEdit(); // Return early if it is not ReplicationMarker edit. if (!WALEdit.isReplicationMarkerEdit(edit)) { return; } List<Cell> cells = edit.getCells(); Preconditions.checkArgument(cells.size() == 1, "ReplicationMarker should have only 1 cell"); Cell cell = cells.get(0); // Create a descriptor with region_server_name, wal_name and offset WALProtos.ReplicationMarkerDescriptor.Builder builder = WALProtos.ReplicationMarkerDescriptor.newBuilder(); builder.setRegionServerName(this.source.getServer().getServerName().getHostname()); builder.setWalName(getCurrentPath().getName()); builder.setOffset(offset); WALProtos.ReplicationMarkerDescriptor descriptor = builder.build(); // Create a new KeyValue KeyValue kv = new KeyValue(CellUtil.cloneRow(cell), CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), descriptor.toByteArray()); ArrayList<Cell> newCells = new ArrayList<>(); newCells.add(kv); // Update edit with new cell. edit.setCells(newCells); }
3.68
open-banking-gateway_ContextUtil_getAndUpdateContext
/** * Get and update context of current execution in single operation with retry support for optimistic exceptions. */ public <T> void getAndUpdateContext(DelegateExecution execution, Consumer<T> contextUpdater) { @SuppressWarnings("unchecked") T ctx = (T) execution.getVariable(GlobalConst.CONTEXT); contextUpdater.accept(ctx); execution.setVariable(GlobalConst.CONTEXT, ctx); }
3.68
flink_RocksDBKeyedStateBackend_snapshot
/** * Triggers an asynchronous snapshot of the keyed state backend from RocksDB. This snapshot can * be canceled and is also stopped when the backend is closed through {@link #dispose()}. For * each backend, this method must always be called by the same thread. * * @param checkpointId The Id of the checkpoint. * @param timestamp The timestamp of the checkpoint. * @param streamFactory The factory that we can use for writing our state to streams. * @param checkpointOptions Options for how to perform this checkpoint. * @return Future to the state handle of the snapshot data. * @throws Exception indicating a problem in the synchronous part of the checkpoint. */ @Nonnull @Override public RunnableFuture<SnapshotResult<KeyedStateHandle>> snapshot( final long checkpointId, final long timestamp, @Nonnull final CheckpointStreamFactory streamFactory, @Nonnull CheckpointOptions checkpointOptions) throws Exception { // flush everything into db before taking a snapshot writeBatchWrapper.flush(); return new SnapshotStrategyRunner<>( checkpointSnapshotStrategy.getDescription(), checkpointSnapshotStrategy, cancelStreamRegistry, ASYNCHRONOUS) .snapshot(checkpointId, timestamp, streamFactory, checkpointOptions); }
3.68
hbase_Scan_getRowOffsetPerColumnFamily
/** * Method for retrieving the scan's offset per row per column family (#kvs to be skipped) * @return row offset */ public int getRowOffsetPerColumnFamily() { return this.storeOffset; }
3.68
hbase_MovingAverage_stop
/** * Mark end time of an execution, and return its interval. * @param startTime start time of an execution * @return elapsed time */ protected long stop(long startTime) { return System.nanoTime() - startTime; }
3.68
hbase_AbstractMemStore_maybeCloneWithAllocator
/** * If the segment has a memory allocator the cell is being cloned to this space, and returned; * Otherwise the given cell is returned When a cell's size is too big (bigger than maxAlloc), it * is not allocated on MSLAB. Since the process of flattening to CellChunkMap assumes that all * cells are allocated on MSLAB, during this process, the input parameter forceCloneOfBigCell is * set to 'true' and the cell is copied into MSLAB. * @param cell the cell to clone * @param forceCloneOfBigCell true only during the process of flattening to CellChunkMap. * @return either the given cell or its clone */ private Cell maybeCloneWithAllocator(MutableSegment currentActive, Cell cell, boolean forceCloneOfBigCell) { return currentActive.maybeCloneWithAllocator(cell, forceCloneOfBigCell); }
3.68
hbase_RSGroupInfo_removeTable
/** * Remove a table * @deprecated Since 3.0.0, will be removed in 4.0.0. The rsgroup information will be stored in * the configuration of a table so this will be removed. */ @Deprecated public boolean removeTable(TableName table) { return tables.remove(table); }
3.68
flink_DataSet_output
/** * Emits a DataSet using an {@link OutputFormat}. This method adds a data sink to the program. * Programs may have multiple data sinks. A DataSet may also have multiple consumers (data sinks * or transformations) at the same time. * * @param outputFormat The OutputFormat to process the DataSet. * @return The DataSink that processes the DataSet. * @see OutputFormat * @see DataSink */ public DataSink<T> output(OutputFormat<T> outputFormat) { Preconditions.checkNotNull(outputFormat); // configure the type if needed if (outputFormat instanceof InputTypeConfigurable) { ((InputTypeConfigurable) outputFormat).setInputType(getType(), context.getConfig()); } DataSink<T> sink = new DataSink<>(this, outputFormat, getType()); this.context.registerDataSink(sink); return sink; }
3.68
dubbo_StringUtils_parseInteger
/** * parse str to Integer(if str is not number or n < 0, then return 0) * * @param str a number str * @return positive integer or zero */ public static int parseInteger(String str) { return isNumber(str) ? Integer.parseInt(str) : 0; }
3.68
hmily_StringUtils_isBlank
/** * Is blank boolean. * * @param cs the cs * @return the boolean */ public static boolean isBlank(final CharSequence cs) { int strLen; if (cs == null || (strLen = cs.length()) == 0) { return true; } for (int i = 0; i < strLen; i++) { if (!Character.isWhitespace(cs.charAt(i))) { return false; } } return true; }
3.68