name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
morf_SqlDialect_getNonKeyFieldsFromMergeStatement
/** * Returns the non key fields from a merge statement. * * @param statement a merge statement * @return the non key fields */ protected Iterable<AliasedField> getNonKeyFieldsFromMergeStatement(MergeStatement statement) { Set<String> tableUniqueKey = statement.getTableUniqueKey().stream() .map(AliasedField::getImpliedName) .collect(Collectors.toSet()); return Iterables.filter( statement.getSelectStatement().getFields(), input -> !tableUniqueKey.contains(input.getImpliedName()) ); }
3.68
morf_AbstractSqlDialectTest_expectedTrim
/** * @return The expected SQL for a Trim */ protected String expectedTrim() { return "SELECT TRIM(field1) FROM " + tableName("schedule"); }
3.68
hbase_JVM_getFreeMemory
/** * Return the physical free memory (not the JVM one, as it's not very useful as it depends on the * GC), but the one from the OS as it allows a little bit more to guess if the machine is * overloaded or not). */ public long getFreeMemory() { if (ibmvendor) { return 0; } Long r = runUnixMXBeanMethod("getFreePhysicalMemorySize"); return (r != null ? r : -1); }
3.68
pulsar_PulsarKafkaSinkTaskContext_currentOffset
// for tests private Long currentOffset(TopicPartition topicPartition) { Long offset = currentOffsets.computeIfAbsent(topicPartition, kv -> { List<ByteBuffer> req = Lists.newLinkedList(); ByteBuffer key = topicPartitionAsKey(topicPartition); req.add(key); try { Map<ByteBuffer, ByteBuffer> result = offsetStore.get(req).get(); if (result != null && result.size() != 0) { Optional<ByteBuffer> val = result.entrySet().stream() .filter(entry -> entry.getKey().equals(key)) .findFirst().map(entry -> entry.getValue()); if (val.isPresent()) { long received = val.get().getLong(); if (log.isDebugEnabled()) { log.debug("read initial offset for {} == {}", topicPartition, received); } return received; } } return -1L; } catch (InterruptedException e) { Thread.currentThread().interrupt(); log.error("error getting initial state of {}", topicPartition, e); throw new RuntimeException("error getting initial state of " + topicPartition, e); } catch (ExecutionException e) { log.error("error getting initial state of {}", topicPartition, e); throw new RuntimeException("error getting initial state of " + topicPartition, e); } }); return offset; }
3.68
hmily_AbstractHmilySQLParserExecutor_generateHmilySelectStatement
/** * Generate Hmily select statement. * * @param selectStatement select statement * @param hmilySelectStatement hmily select statement * @return hmily select statement */ public HmilySelectStatement generateHmilySelectStatement(final SelectStatement selectStatement, final HmilySelectStatement hmilySelectStatement) { return SelectStatementAssembler.assembleHmilySelectStatement(selectStatement, hmilySelectStatement); }
3.68
hadoop_ManifestCommitter_getOrCreateSuccessData
/** * Get the manifest success data for this job; creating on demand if needed. * @param committerConfig source config. * @return the current {@link #successReport} value; never null. */ private ManifestSuccessData getOrCreateSuccessData( ManifestCommitterConfig committerConfig) { if (successReport == null) { successReport = createManifestOutcome( committerConfig.createStageConfig(), activeStage); } return successReport; }
3.68
pulsar_ManagedLedgerConfig_setMaxEntriesPerLedger
/** * @param maxEntriesPerLedger * the maxEntriesPerLedger to set */ public ManagedLedgerConfig setMaxEntriesPerLedger(int maxEntriesPerLedger) { this.maxEntriesPerLedger = maxEntriesPerLedger; return this; }
3.68
framework_FlyweightRow_getUnattachedCells
/** * Returns a subrange of unattached flyweight cells. Unattached cells do not * have {@link FlyweightCell#getElement() elements} associated. Note that * FlyweightRow does not keep track of whether cells in actuality have * corresponding DOM elements or not; it is the caller's responsibility to * invoke this method with correct parameters. * <p> * Precondition: the range [offset, offset + numberOfCells) must be valid * * @param offset * the index of the first cell to return * @param numberOfCells * the number of cells to return * @return an iterable of flyweight cells */ public Iterable<FlyweightCell> getUnattachedCells(final int offset, final int numberOfCells) { assertSetup(); assert offset >= 0 && offset + numberOfCells <= cells .size() : "Invalid range of cells"; return () -> CellIterator .unattached(cells.subList(offset, offset + numberOfCells)); }
3.68
flink_BlobServer_getReadWriteLock
/** Returns the lock used to guard file accesses. */ ReadWriteLock getReadWriteLock() { return readWriteLock; }
3.68
pulsar_DLInputStream_readAsync
/** * Read data to output stream. * * @param outputStream the data write to * @return */ CompletableFuture<DLInputStream> readAsync(OutputStream outputStream) { CompletableFuture<Void> outputFuture = new CompletableFuture<>(); read(outputStream, outputFuture, 10); return outputFuture.thenApply(ignore -> this); }
3.68
flink_MiniCluster_getHaLeadershipControl
/** * Returns {@link HaLeadershipControl} if enabled. * * <p>{@link HaLeadershipControl} allows granting and revoking leadership of HA components, e.g. * JobManager. The method return {@link Optional#empty()} if the control is not enabled in * {@link MiniClusterConfiguration}. * * <p>Enabling this feature disables {@link HighAvailabilityOptions#HA_MODE} option. */ public Optional<HaLeadershipControl> getHaLeadershipControl() { synchronized (lock) { return haServices instanceof HaLeadershipControl ? Optional.of((HaLeadershipControl) haServices) : Optional.empty(); } }
3.68
dubbo_DynamicConfiguration_addListener
/** * {@link #addListener(String, String, ConfigurationListener)} * * @param key the key to represent a configuration * @param listener configuration listener */ default void addListener(String key, ConfigurationListener listener) { addListener(key, getDefaultGroup(), listener); }
3.68
framework_RendererCellReference_getColSpan
/** * Gets the colspan attribute of the element of this cell. * * @return the number of columns that the cell should span */ public int getColSpan() { return cell.getColSpan(); }
3.68
querydsl_AbstractLuceneQuery_distinct
/** * Add a DuplicateFilter for the field of the given property path * * @param property distinct property * @return the current object */ public Q distinct(Path<?> property) { return filter(new DuplicateFilter(serializer.toField(property))); }
3.68
hbase_ActiveMasterManager_getBackupMasters
/** Returns list of registered backup masters. */ public List<ServerName> getBackupMasters() { return backupMasters; }
3.68
Activiti_DelegateHelper_isExecutingExecutionListener
/** * Returns whether or not the provided execution is being use for executing an {@link ExecutionListener}. */ public static boolean isExecutingExecutionListener(DelegateExecution execution) { return execution.getCurrentActivitiListener() != null; }
3.68
framework_AbstractBeanContainer_addItem
/** * Adds the bean to the Container. * * Note: the behavior of this method changed in Vaadin 6.6 - now items are * added at the very end of the unfiltered container and not after the last * visible item if filtering is used. * * @see Container#addItem(Object) */ protected BeanItem<BEANTYPE> addItem(IDTYPE itemId, BEANTYPE bean) { if (!validateBean(bean)) { return null; } return internalAddItemAtEnd(itemId, createBeanItem(bean), true); }
3.68
hbase_InnerStoreCellComparator_getInnerStoreCellComparator
/** * Utility method that makes a guess at comparator to use based off passed tableName. Use in * extreme when no comparator specified. * @return CellComparator to use going off the {@code tableName} passed. */ public static CellComparator getInnerStoreCellComparator(byte[] tableName) { return Bytes.equals(tableName, TableName.META_TABLE_NAME.toBytes()) ? MetaCellComparator.META_COMPARATOR : InnerStoreCellComparator.INNER_STORE_COMPARATOR; }
3.68
hudi_HoodieTableMetaClient_getCommitsTimeline
/** * Get the commit timeline visible for this table. */ public HoodieTimeline getCommitsTimeline() { switch (this.getTableType()) { case COPY_ON_WRITE: return getActiveTimeline().getCommitTimeline(); case MERGE_ON_READ: // We need to include the parquet files written out in delta commits // Include commit action to be able to start doing a MOR over a COW table - no // migration required return getActiveTimeline().getCommitsTimeline(); default: throw new HoodieException("Unsupported table type :" + this.getTableType()); } }
3.68
framework_FocusableHTML_setFocus
/** * Sets/Removes the keyboard focus to the panel. * * @param focus * If set to true then the focus is moved to the panel, if set to * false the focus is removed */ public void setFocus(boolean focus) { if (focus) { FocusImpl.getFocusImplForPanel().focus(getElement()); } else { FocusImpl.getFocusImplForPanel().blur(getElement()); } }
3.68
shardingsphere-elasticjob_JobFacade_failoverIfNecessary
/** * Failover If necessary. */ public void failoverIfNecessary() { if (configService.load(true).isFailover()) { failoverService.failoverIfNecessary(); } }
3.68
framework_TreeGrid_collapseRecursively
/** * Collapse the given items and their children recursively until the given * depth. * <p> * {@code depth} describes the maximum distance between a given item and its * descendant, meaning that {@code collapseRecursively(items, 0)} collapses * only the given items while {@code collapseRecursively(items, 2)} * collapses the given items as well as their children and grandchildren. * <p> * This method will <i>not</i> fire events for collapsed nodes. * * @param items * the items to collapse recursively * @param depth * the maximum depth of recursion * @since 8.4 */ public void collapseRecursively(Stream<T> items, int depth) { if (depth < 0) { return; } HierarchicalDataCommunicator<T> communicator = getDataCommunicator(); items.forEach(item -> { if (communicator.hasChildren(item)) { collapseRecursively( getDataProvider().fetchChildren( new HierarchicalQuery<>(null, item)), depth - 1); communicator.collapse(item, false); } }); getDataProvider().refreshAll(); }
3.68
graphhopper_VectorTile_addAllLayers
/** * <code>repeated .vector_tile.Tile.Layer layers = 3;</code> */ public Builder addAllLayers( java.lang.Iterable<? extends vector_tile.VectorTile.Tile.Layer> values) { if (layersBuilder_ == null) { ensureLayersIsMutable(); com.google.protobuf.AbstractMessageLite.Builder.addAll( values, layers_); onChanged(); } else { layersBuilder_.addAllMessages(values); } return this; }
3.68
pulsar_LoadManagerShared_fillNamespaceToBundlesMap
/** * Using the given bundles, populate the namespace to bundle range map. * * @param bundles * Bundles with which to populate. * @param target * Map to fill. */ public static void fillNamespaceToBundlesMap(final Set<String> bundles, final ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>> target) { bundles.forEach(bundleName -> { final String namespaceName = getNamespaceNameFromBundleName(bundleName); final String bundleRange = getBundleRangeFromBundleName(bundleName); target.computeIfAbsent(namespaceName, k -> ConcurrentOpenHashSet.<String>newBuilder().build()) .add(bundleRange); }); }
3.68
flink_FutureUtils_isCompletedNormally
/** @return true if future has completed normally, false otherwise. */ public static boolean isCompletedNormally(CompletableFuture<?> future) { return future.isDone() && !future.isCompletedExceptionally(); }
3.68
framework_GridLayout_setMargin
/* * (non-Javadoc) * * @see com.vaadin.ui.Layout.MarginHandler#setMargin(com.vaadin.shared.ui. * MarginInfo ) */ @Override @SuppressWarnings("deprecation") public void setMargin(MarginInfo marginInfo) { getState().marginsBitmask = marginInfo.getBitMask(); }
3.68
hadoop_CloseableReferenceCount_unreferenceCheckClosed
/** * Decrement the reference count, checking to make sure that the * CloseableReferenceCount is not closed. * * @throws AsynchronousCloseException If the status is closed. */ public void unreferenceCheckClosed() throws ClosedChannelException { int newVal = status.decrementAndGet(); if ((newVal & STATUS_CLOSED_MASK) != 0) { throw new AsynchronousCloseException(); } }
3.68
flink_KeyGroupsStateHandle_getIntersection
/** * @param keyGroupRange a key group range to intersect. * @return key-group state over a range that is the intersection between this handle's key-group * range and the provided key-group range. */ @Override public KeyGroupsStateHandle getIntersection(KeyGroupRange keyGroupRange) { KeyGroupRangeOffsets offsets = groupRangeOffsets.getIntersection(keyGroupRange); if (offsets.getKeyGroupRange().getNumberOfKeyGroups() <= 0) { return null; } return new KeyGroupsStateHandle(offsets, stateHandle, stateHandleId); }
3.68
hbase_ResponseConverter_buildHasPermissionResponse
/** * Builds a protocol buffer HasPermissionResponse */ public static HasPermissionResponse buildHasPermissionResponse(boolean hasPermission) { HasPermissionResponse.Builder builder = HasPermissionResponse.newBuilder(); builder.setHasPermission(hasPermission); return builder.build(); }
3.68
hbase_ByteBuff_compareTo
/** * Compares two ByteBuffs * @param buf1 the first ByteBuff * @param o1 the offset in the first ByteBuff from where the compare has to happen * @param len1 the length in the first ByteBuff upto which the compare has to happen * @param buf2 the second ByteBuff * @param o2 the offset in the second ByteBuff from where the compare has to happen * @param len2 the length in the second ByteBuff upto which the compare has to happen * @return Positive if buf1 is bigger than buf2, 0 if they are equal, and negative if buf1 is * smaller than buf2. */ public static int compareTo(ByteBuff buf1, int o1, int len1, ByteBuff buf2, int o2, int len2) { if (buf1.hasArray() && buf2.hasArray()) { return Bytes.compareTo(buf1.array(), buf1.arrayOffset() + o1, len1, buf2.array(), buf2.arrayOffset() + o2, len2); } int end1 = o1 + len1; int end2 = o2 + len2; for (int i = o1, j = o2; i < end1 && j < end2; i++, j++) { int a = buf1.get(i) & 0xFF; int b = buf2.get(j) & 0xFF; if (a != b) { return a - b; } } return len1 - len2; }
3.68
hadoop_IdentityTransformer_transformAclEntriesForGetRequest
/** * Perform Identity transformation when calling GetAclStatus() * If the AclEntry type is a user or group, and its name is one of the following: * <pre> * 1. $superuser: * by default it will be transformed to local user/group, this can be disabled by setting * "fs.azure.identity.transformer.skip.superuser.replacement" to true. * * 2. User principal id: * can be transformed to localUser/localGroup, if this principal id matches the principal id set in * "fs.azure.identity.transformer.service.principal.id" and localIdentity is stated in * "fs.azure.identity.transformer.service.principal.substitution.list" * * 3. User principal name (UPN): * can be transformed to a short name(local identity) if originalIdentity is owner name, and * "fs.azure.identity.transformer.enable.short.name" is enabled. * </pre> * @param aclEntries list of AclEntry * @param localUser local user name * @param localGroup local primary group * */ public void transformAclEntriesForGetRequest(final List<AclEntry> aclEntries, String localUser, String localGroup) throws IOException { if (skipUserIdentityReplacement) { return; } for (int i = 0; i < aclEntries.size(); i++) { AclEntry aclEntry = aclEntries.get(i); String name = aclEntry.getName(); String transformedName = name; if (name == null || name.isEmpty() || aclEntry.getType().equals(AclEntryType.OTHER) || aclEntry.getType().equals(AclEntryType.MASK)) { continue; } // when type of aclEntry is user or group if (aclEntry.getType().equals(AclEntryType.USER)) { transformedName = transformIdentityForGetRequest(name, true, localUser); } else if (aclEntry.getType().equals(AclEntryType.GROUP)) { transformedName = transformIdentityForGetRequest(name, false, localGroup); } // Avoid unnecessary new AclEntry allocation if (transformedName.equals(name)) { continue; } AclEntry.Builder aclEntryBuilder = new AclEntry.Builder(); aclEntryBuilder.setType(aclEntry.getType()); aclEntryBuilder.setName(transformedName); aclEntryBuilder.setScope(aclEntry.getScope()); aclEntryBuilder.setPermission(aclEntry.getPermission()); // Replace the original AclEntry aclEntries.set(i, aclEntryBuilder.build()); } }
3.68
hbase_TableOutputFormat_close
/** * Closes the writer, in this case flush table commits. * @param context The context. * @throws IOException When closing the writer fails. * @see RecordWriter#close(TaskAttemptContext) */ @Override public void close(TaskAttemptContext context) throws IOException { try { if (mutator != null) { mutator.close(); } } finally { if (connection != null) { connection.close(); } } }
3.68
hadoop_TimelineEntityGroupId_getApplicationId
/** * Get the <code>ApplicationId</code> of the * <code>TimelineEntityGroupId</code>. * * @return <code>ApplicationId</code> of the * <code>TimelineEntityGroupId</code> */ public ApplicationId getApplicationId() { return this.applicationId; }
3.68
hbase_BaseEnvironment_getHBaseVersion
/** Returns the HBase release */ @Override public String getHBaseVersion() { return VersionInfo.getVersion(); }
3.68
morf_ExecuteStatement_accept
/** * {@inheritDoc} * * @see org.alfasoftware.morf.upgrade.SchemaChange#accept(org.alfasoftware.morf.upgrade.SchemaChangeVisitor) */ @Override public void accept(SchemaChangeVisitor visitor) { visitor.visit(this); }
3.68
hbase_Compressor_writeCompressed
/** * Compresses and writes an array to a DataOutput * @param data the array to write. * @param out the DataOutput to write into * @param dict the dictionary to use for compression */ @Deprecated static void writeCompressed(byte[] data, int offset, int length, DataOutput out, Dictionary dict) throws IOException { short dictIdx = Dictionary.NOT_IN_DICTIONARY; if (dict != null) { dictIdx = dict.findEntry(data, offset, length); } if (dictIdx == Dictionary.NOT_IN_DICTIONARY) { // not in dict out.writeByte(Dictionary.NOT_IN_DICTIONARY); WritableUtils.writeVInt(out, length); out.write(data, offset, length); } else { out.writeShort(dictIdx); } }
3.68
hadoop_FederationRegistryClient_listDirRegistry
/** * List the sub directories in the given directory. */ private List<String> listDirRegistry(final RegistryOperations registryImpl, UserGroupInformation ugi, final String key, final boolean throwIfFails) throws YarnException { List<String> result = ugi.doAs((PrivilegedAction<List<String>>) () -> { try { return registryImpl.list(key); } catch (Throwable e) { if (throwIfFails) { LOG.error("Registry list key {} failed.", key, e); } } return null; }); if (result == null && throwIfFails) { throw new YarnException("Registry list key " + key + " failed"); } return result; }
3.68
hbase_MetricsStochasticBalancerSourceImpl_updateStochasticCost
/** * Reports stochastic load balancer costs to JMX */ @Override public void updateStochasticCost(String tableName, String costFunctionName, String functionDesc, Double cost) { if (tableName == null || costFunctionName == null || cost == null) { return; } if (functionDesc != null) { costFunctionDescs.put(costFunctionName, functionDesc); } synchronized (stochasticCosts) { Map<String, Double> costs = stochasticCosts.get(tableName); if (costs == null) { costs = new ConcurrentHashMap<>(); } costs.put(costFunctionName, cost); stochasticCosts.put(tableName, costs); } }
3.68
pulsar_BrokerService_getAutoSubscriptionCreationOverride
/** * @deprecated Avoid using the deprecated method * #{@link org.apache.pulsar.broker.resources.NamespaceResources#getPoliciesIfCached(NamespaceName)} and blocking * call. we can use #{@link BrokerService#isAllowAutoSubscriptionCreationAsync(TopicName)} to instead of it. */ @Deprecated private AutoSubscriptionCreationOverride getAutoSubscriptionCreationOverride(final TopicName topicName) { Optional<TopicPolicies> topicPolicies = getTopicPolicies(topicName); if (topicPolicies.isPresent() && topicPolicies.get().getAutoSubscriptionCreationOverride() != null) { return topicPolicies.get().getAutoSubscriptionCreationOverride(); } Optional<Policies> policies = pulsar.getPulsarResources().getNamespaceResources().getPoliciesIfCached(topicName.getNamespaceObject()); // If namespace policies have the field set, it will override the broker-level setting if (policies.isPresent() && policies.get().autoSubscriptionCreationOverride != null) { return policies.get().autoSubscriptionCreationOverride; } log.debug("No autoSubscriptionCreateOverride policy found for {}", topicName); return null; }
3.68
flink_HsFileDataManager_registerNewConsumer
/** This method only called by result partition to create subpartitionFileReader. */ public HsDataView registerNewConsumer( int subpartitionId, HsConsumerId consumerId, HsSubpartitionConsumerInternalOperations operation) throws IOException { synchronized (lock) { checkState(!isReleased, "HsFileDataManager is already released."); lazyInitialize(); HsSubpartitionFileReader subpartitionReader = fileReaderFactory.createFileReader( subpartitionId, consumerId, dataFileChannel, operation, dataIndex, hybridShuffleConfiguration.getMaxBuffersReadAhead(), this::releaseSubpartitionReader, headerBuf); allReaders.add(subpartitionReader); mayTriggerReading(); return subpartitionReader; } }
3.68
hibernate-validator_NotBlankValidator_isValid
/** * Checks that the character sequence is not {@code null} nor empty after removing any leading or trailing * whitespace. * * @param charSequence the character sequence to validate * @param constraintValidatorContext context in which the constraint is evaluated * @return returns {@code true} if the string is not {@code null} and the length of the trimmed * {@code charSequence} is strictly superior to 0, {@code false} otherwise */ @Override public boolean isValid(CharSequence charSequence, ConstraintValidatorContext constraintValidatorContext) { if ( charSequence == null ) { return false; } return charSequence.toString().trim().length() > 0; }
3.68
streampipes_NumWordsRulesExtractor_getInstance
/** * Returns the singleton instance for {@link NumWordsRulesExtractor}. */ public static NumWordsRulesExtractor getInstance() { return INSTANCE; }
3.68
framework_FieldBinder_asFieldName
/** * Converts the given identifier to a valid field name by stripping away * illegal character and setting the first letter of the name to lower case. * * @param identifier * the identifier to be converted to field name * @return the field name corresponding the identifier */ private static String asFieldName(String identifier) { if (identifier == null) { return ""; } StringBuilder result = new StringBuilder(); for (int i = 0; i < identifier.length(); i++) { char character = identifier.charAt(i); if (Character.isJavaIdentifierPart(character)) { result.append(character); } } // lowercase first letter if (result.length() != 0 && Character.isLetter(result.charAt(0))) { result.setCharAt(0, Character.toLowerCase(result.charAt(0))); } return result.toString(); }
3.68
hadoop_ProxyUtils_notFound
/** * Output 404 with appropriate message. * @param resp the http response. * @param message the message to include on the page. * @throws IOException on any error. */ public static void notFound(HttpServletResponse resp, String message) throws IOException { resp.setStatus(HttpServletResponse.SC_NOT_FOUND); resp.setContentType(MimeType.HTML); Page p = new Page(resp.getWriter()); p.html(). h1(message). __(); }
3.68
flink_LogicalTableScan_explainTerms
// BEGIN FLINK MODIFICATION // {@link #explainTerms} method should consider hints due to CALCITE-4581. // This file should be remove once CALCITE-4581 is fixed. @Override public RelWriter explainTerms(RelWriter pw) { return super.explainTerms(pw).itemIf("hints", getHints(), !getHints().isEmpty()); }
3.68
shardingsphere-elasticjob_QueryParameterMap_getFirst
/** * Get the first from values. * * @param parameterName parameter name * @return first value */ public String getFirst(final String parameterName) { String firstValue = null; List<String> values = queryMap.get(parameterName); if (values != null && !values.isEmpty()) { firstValue = values.get(0); } return firstValue; }
3.68
pulsar_LoadSimulationController_checkAppArgs
// Check that the expected number of application arguments matches the // actual number of application arguments. private boolean checkAppArgs(final int numAppArgs, final int numRequired) { if (numAppArgs != numRequired) { log.info("ERROR: Wrong number of application arguments (found {}, required {})", numAppArgs, numRequired); return false; } return true; }
3.68
hbase_FlushSnapshotSubprocedure_insideBarrier
/** * do a flush snapshot of every region on this rs from the target table. */ @Override public byte[] insideBarrier() throws ForeignException { flushSnapshot(); return new byte[0]; }
3.68
framework_AbstractClickEventHandler_hasEventListener
/** * Checks if there is a server side event listener registered for clicks. * * @return true if there is a server side event listener registered, false * otherwise */ public boolean hasEventListener() { return connector.hasEventListener(clickEventIdentifier); }
3.68
flink_UniqueConstraint_primaryKey
/** Creates a non enforced {@link ConstraintType#PRIMARY_KEY} constraint. */ public static UniqueConstraint primaryKey(String name, List<String> columns) { return new UniqueConstraint(name, false, ConstraintType.PRIMARY_KEY, columns); }
3.68
morf_SqlServerDialect_buildPrimaryKeyConstraint
/** * @param tableName Name of the table. * @param primaryKeys List of the primary keys on the table. */ private String buildPrimaryKeyConstraint(String tableName, List<String> primaryKeys) { StringBuilder pkConstraint = new StringBuilder(); pkConstraint.append("CONSTRAINT ["); pkConstraint.append(undecorateName(tableName)); pkConstraint.append("_PK] PRIMARY KEY (["); pkConstraint.append(Joiner.on("], [").join(primaryKeys)); pkConstraint.append("])"); return pkConstraint.toString(); }
3.68
flink_StatePartitionStreamProvider_getStream
/** Returns a stream with the data of one state partition. */ public InputStream getStream() throws IOException { if (creationException != null) { throw new IOException(creationException); } return stream; }
3.68
hadoop_MultipleOutputFormat_generateLeafFileName
/** * Generate the leaf name for the output file name. The default behavior does * not change the leaf file name (such as part-00000) * * @param name * the leaf file name for the output file * @return the given leaf file name */ protected String generateLeafFileName(String name) { return name; }
3.68
flink_TGetQueryIdResp_isSet
/** * Returns true if field corresponding to fieldID is set (has been assigned a value) and false * otherwise */ public boolean isSet(_Fields field) { if (field == null) { throw new java.lang.IllegalArgumentException(); } switch (field) { case QUERY_ID: return isSetQueryId(); } throw new java.lang.IllegalStateException(); }
3.68
flink_ChangelogTruncateHelper_checkpointSubsumed
/** Handle checkpoint subsumption, potentially {@link #truncate() truncating} the changelog. */ public void checkpointSubsumed(long checkpointId) { SequenceNumber sqn = checkpointedUpTo.get(checkpointId); LOG.debug("checkpoint {} subsumed, max sqn: {}", checkpointId, sqn); if (sqn != null) { subsumedUpTo = sqn; checkpointedUpTo.headMap(checkpointId, true).clear(); truncate(); } }
3.68
hbase_FuzzyRowFilter_areSerializedFieldsEqual
/** * Returns true if and only if the fields of the filter that are serialized are equal to the * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) { return true; } if (!(o instanceof FuzzyRowFilter)) { return false; } FuzzyRowFilter other = (FuzzyRowFilter) o; if (this.fuzzyKeysData.size() != other.fuzzyKeysData.size()) return false; for (int i = 0; i < fuzzyKeysData.size(); ++i) { Pair<byte[], byte[]> thisData = this.fuzzyKeysData.get(i); Pair<byte[], byte[]> otherData = other.fuzzyKeysData.get(i); if ( !(Bytes.equals(thisData.getFirst(), otherData.getFirst()) && Bytes.equals(thisData.getSecond(), otherData.getSecond())) ) { return false; } } return true; }
3.68
hudi_HoodieTable_deleteMetadataIndexIfNecessary
/** * Deletes the metadata partition if the writer disables any metadata index. */ public void deleteMetadataIndexIfNecessary() { Stream.of(MetadataPartitionType.values()).forEach(partitionType -> { if (shouldDeleteMetadataPartition(partitionType)) { try { LOG.info("Deleting metadata partition because it is disabled in writer: " + partitionType.name()); if (metadataPartitionExists(metaClient.getBasePath(), context, partitionType)) { deleteMetadataPartition(metaClient.getBasePath(), context, partitionType); } clearMetadataTablePartitionsConfig(Option.of(partitionType), false); } catch (HoodieMetadataException e) { throw new HoodieException("Failed to delete metadata partition: " + partitionType.name(), e); } } }); }
3.68
flink_EndOfData_write
// // These methods are inherited form the generic serialization of AbstractEvent // but would require the CheckpointBarrier to be mutable. Since all serialization // for events goes through the EventSerializer class, which has special serialization // for the CheckpointBarrier, we don't need these methods // @Override public void write(DataOutputView out) throws IOException { throw new UnsupportedOperationException("This method should never be called"); }
3.68
flink_Tuple20_setFields
/** * Sets new values to all fields of the tuple. * * @param f0 The value for field 0 * @param f1 The value for field 1 * @param f2 The value for field 2 * @param f3 The value for field 3 * @param f4 The value for field 4 * @param f5 The value for field 5 * @param f6 The value for field 6 * @param f7 The value for field 7 * @param f8 The value for field 8 * @param f9 The value for field 9 * @param f10 The value for field 10 * @param f11 The value for field 11 * @param f12 The value for field 12 * @param f13 The value for field 13 * @param f14 The value for field 14 * @param f15 The value for field 15 * @param f16 The value for field 16 * @param f17 The value for field 17 * @param f18 The value for field 18 * @param f19 The value for field 19 */ public void setFields( T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12, T13 f13, T14 f14, T15 f15, T16 f16, T17 f17, T18 f18, T19 f19) { this.f0 = f0; this.f1 = f1; this.f2 = f2; this.f3 = f3; this.f4 = f4; this.f5 = f5; this.f6 = f6; this.f7 = f7; this.f8 = f8; this.f9 = f9; this.f10 = f10; this.f11 = f11; this.f12 = f12; this.f13 = f13; this.f14 = f14; this.f15 = f15; this.f16 = f16; this.f17 = f17; this.f18 = f18; this.f19 = f19; }
3.68
graphhopper_GTFSFeed_clone
/** * Cloning can be useful when you want to make only a few modifications to an existing feed. * Keep in mind that this is a shallow copy, so you'll have to create new maps in the clone for tables you want * to modify. */ @Override public GTFSFeed clone() { try { return (GTFSFeed) super.clone(); } catch (CloneNotSupportedException e) { throw new RuntimeException(e); } }
3.68
hadoop_AbstractRMAdminRequestInterceptor_getConf
/** * Gets the {@link Configuration}. */ @Override public Configuration getConf() { return this.conf; }
3.68
hadoop_AuditSpan_close
/** * Close calls {@link #deactivate()}; subclasses may override * but the audit manager's wrapping span will always relay to * {@link #deactivate()} rather * than call this method on the wrapped span. */ default void close() { deactivate(); }
3.68
flink_FlinkJoinToMultiJoinRule_shiftRightFilter
/** * Shifts a filter originating from the right child of the LogicalJoin to the right, to reflect * the filter now being applied on the resulting MultiJoin. * * @param joinRel the original LogicalJoin * @param left the left child of the LogicalJoin * @param right the right child of the LogicalJoin * @param rightFilter the filter originating from the right child * @return the adjusted right filter */ private RexNode shiftRightFilter( Join joinRel, RelNode left, MultiJoin right, RexNode rightFilter) { if (rightFilter == null) { return null; } int nFieldsOnLeft = left.getRowType().getFieldList().size(); int nFieldsOnRight = right.getRowType().getFieldList().size(); int[] adjustments = new int[nFieldsOnRight]; for (int i = 0; i < nFieldsOnRight; i++) { adjustments[i] = nFieldsOnLeft; } rightFilter = rightFilter.accept( new RelOptUtil.RexInputConverter( joinRel.getCluster().getRexBuilder(), right.getRowType().getFieldList(), joinRel.getRowType().getFieldList(), adjustments)); return rightFilter; }
3.68
framework_PropertyFormatter_setPropertyDataSource
/** * Sets the specified Property as the data source for the formatter. * * * <p> * Remember that new data sources getValue() must return objects that are * compatible with parse() and format() methods. * </p> * * @param newDataSource * the new data source Property. */ @Override public void setPropertyDataSource(Property newDataSource) { boolean readOnly = false; String prevValue = null; if (dataSource != null) { if (dataSource instanceof Property.ValueChangeNotifier) { ((Property.ValueChangeNotifier) dataSource) .removeListener(this); } if (dataSource instanceof Property.ReadOnlyStatusChangeListener) { ((Property.ReadOnlyStatusChangeNotifier) dataSource) .removeListener(this); } readOnly = isReadOnly(); prevValue = getValue(); } dataSource = newDataSource; if (dataSource != null) { if (dataSource instanceof Property.ValueChangeNotifier) { ((Property.ValueChangeNotifier) dataSource).addListener(this); } if (dataSource instanceof Property.ReadOnlyStatusChangeListener) { ((Property.ReadOnlyStatusChangeNotifier) dataSource) .addListener(this); } } if (isReadOnly() != readOnly) { fireReadOnlyStatusChange(); } String newVal = getValue(); if ((prevValue == null && newVal != null) || (prevValue != null && !prevValue.equals(newVal))) { fireValueChange(); } }
3.68
framework_VCalendarPanel_handleNavigationYearMode
/** * Handles the keyboard navigation when the resolution is set to years. * * @param keycode * The keycode to process * @param ctrl * Is ctrl pressed? * @param shift * is shift pressed * @return Returns true if the keycode was processed, else false */ protected boolean handleNavigationYearMode(int keycode, boolean ctrl, boolean shift) { // Ctrl and Shift selection not supported if (ctrl || shift) { return false; } else if (keycode == getPreviousKey()) { focusNextYear(10); // Add 10 years return true; } else if (keycode == getForwardKey()) { focusNextYear(1); // Add 1 year return true; } else if (keycode == getNextKey()) { focusPreviousYear(10); // Subtract 10 years return true; } else if (keycode == getBackwardKey()) { focusPreviousYear(1); // Subtract 1 year return true; } else if (keycode == getSelectKey()) { value = (Date) focusedDate.clone(); onSubmit(); return true; } else if (keycode == getResetKey()) { // Restore showing value the selected value focusedDate.setTime(value.getTime()); renderCalendar(); return true; } else if (keycode == getCloseKey()) { // TODO fire listener, on users responsibility?? onCancel(); return true; } return false; }
3.68
hbase_Table_mutateRow
/** * Performs multiple mutations atomically on a single row. Currently {@link Put} and * {@link Delete} are supported. * @param rm object that specifies the set of mutations to perform atomically * @return results of Increment/Append operations * @throws IOException if a remote or network exception occurs. */ default Result mutateRow(final RowMutations rm) throws IOException { throw new NotImplementedException("Add an implementation!"); }
3.68
framework_CalendarWeekDropHandler_dragLeave
/* * (non-Javadoc) * * @see * com.vaadin.terminal.gwt.client.ui.dd.VAbstractDropHandler#dragLeave(com * .vaadin.terminal.gwt.client.ui.dd.VDragEvent) */ @Override public void dragLeave(VDragEvent drag) { deEmphasis(); super.dragLeave(drag); }
3.68
hadoop_DeSelectFields_toString
/** * use literals as toString. * @return the literals of this type. */ @Override public String toString() { return literals; }
3.68
flink_UserDefinedFunctionHelper_validateClassForRuntime
/** * Validates a {@link UserDefinedFunction} class for usage in the runtime. * * <p>Note: This is for the final validation when actual {@link DataType}s for arguments and * result are known. */ public static void validateClassForRuntime( Class<? extends UserDefinedFunction> functionClass, String methodName, Class<?>[] argumentClasses, Class<?> outputClass, String functionName) { final List<Method> methods = ExtractionUtils.collectMethods(functionClass, methodName); // verifies regular JVM calling semantics final boolean isMatching = methods.stream() .anyMatch( method -> ExtractionUtils.isInvokable(method, argumentClasses) && ExtractionUtils.isAssignable( outputClass, method.getReturnType(), true)); if (!isMatching) { throw new ValidationException( String.format( "Could not find an implementation method '%s' in class '%s' for function '%s' that " + "matches the following signature:\n%s", methodName, functionClass.getName(), functionName, ExtractionUtils.createMethodSignatureString( methodName, argumentClasses, outputClass))); } }
3.68
querydsl_JTSCurveExpression_startPoint
/** * The start Point of this Curve. * * @return start point */ public JTSPointExpression<Point> startPoint() { if (startPoint == null) { startPoint = JTSGeometryExpressions.pointOperation(SpatialOps.START_POINT, mixin); } return startPoint; }
3.68
dubbo_TTable_width
/** * visible width for the given string. * * for example: "abc\n1234"'s width is 4. * * @param string the given string * @return visible width */ private static int width(String string) { int maxWidth = 0; try (Scanner scanner = new Scanner(new StringReader(string))) { while (scanner.hasNextLine()) { maxWidth = max(length(scanner.nextLine()), maxWidth); } } return maxWidth; }
3.68
framework_Tree_readItem
/** * Reads an Item from a design and inserts it into the data source. * Recursively handles any children of the item as well. * * @since 7.5.0 * @param node * an element representing the item (tree node). * @param selected * A set accumulating selected items. If the item that is read is * marked as selected, its item id should be added to this set. * @param context * the DesignContext instance used in parsing * @return the item id of the new item * * @throws DesignException * if the tag name of the {@code node} element is not * {@code node}. */ @Override protected String readItem(Element node, Set<String> selected, DesignContext context) { if (!"node".equals(node.tagName())) { throw new DesignException("Unrecognized child element in " + getClass().getSimpleName() + ": " + node.tagName()); } String itemId = node.attr("text"); addItem(itemId); if (node.hasAttr("icon")) { Resource icon = DesignAttributeHandler.readAttribute("icon", node.attributes(), Resource.class); setItemIcon(itemId, icon); } if (node.hasAttr("selected")) { selected.add(itemId); } for (Element child : node.children()) { String childItemId = readItem(child, selected, context); setParent(childItemId, itemId); } return itemId; }
3.68
framework_ConnectorTracker_removeUnregisteredConnector
/** * Removes all references and information about the given connector, which * must not be registered. * * @param connector * @param globalResourceHandler */ private void removeUnregisteredConnector(ClientConnector connector, GlobalResourceHandler globalResourceHandler) { ClientConnector removedConnector = connectorIdToConnector .remove(connector.getConnectorId()); assert removedConnector == connector; if (globalResourceHandler != null) { globalResourceHandler.unregisterConnector(connector); } uninitializedConnectors.remove(connector); diffStates.remove(connector); }
3.68
framework_StringLengthValidator_setMaxLength
/** * Sets the maximum permissible length of the string. * * @param maxLength * the maximum length to accept or null for no limit */ public void setMaxLength(Integer maxLength) { validator.setMaxValue(maxLength); }
3.68
hbase_SegmentScanner_seekToPreviousRow
/** * Seek the scanner at the first Cell of the row which is the previous row of specified key * @param cell seek value * @return true if the scanner at the first valid Cell of previous row, false if not existing such * Cell */ @Override public boolean seekToPreviousRow(Cell cell) throws IOException { if (closed) { return false; } boolean keepSeeking; Cell key = cell; do { Cell firstKeyOnRow = PrivateCellUtil.createFirstOnRow(key); SortedSet<Cell> cellHead = segment.headSet(firstKeyOnRow); Cell lastCellBeforeRow = cellHead.isEmpty() ? null : cellHead.last(); if (lastCellBeforeRow == null) { current = null; return false; } Cell firstKeyOnPreviousRow = PrivateCellUtil.createFirstOnRow(lastCellBeforeRow); this.stopSkippingKVsIfNextRow = true; this.stopSkippingKVsRow = firstKeyOnPreviousRow; seek(firstKeyOnPreviousRow); this.stopSkippingKVsIfNextRow = false; if ( peek() == null || segment.getComparator().compareRows(peek(), firstKeyOnPreviousRow) > 0 ) { keepSeeking = true; key = firstKeyOnPreviousRow; continue; } else { keepSeeking = false; } } while (keepSeeking); return true; }
3.68
hbase_TableHFileArchiveTracker_start
/** * Start monitoring for archive updates * @throws KeeperException on failure to find/create nodes */ public void start() throws KeeperException { // if archiving is enabled, then read in the list of tables to archive LOG.debug("Starting hfile archive tracker..."); this.checkEnabledAndUpdate(); LOG.debug("Finished starting hfile archive tracker!"); }
3.68
hadoop_WordMean_reduce
/** * Sums all the individual values within the iterator and writes them to the * same key. * * @param key * This will be one of 2 constants: LENGTH_STR or COUNT_STR. * @param values * This will be an iterator of all the values associated with that * key. */ public void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException { long theSum = 0; for (LongWritable val : values) { theSum += val.get(); } sum.set(theSum); context.write(key, sum); }
3.68
framework_VDragAndDropWrapper_hookHtml5Events
/** * Prototype code, memory leak risk. * * @param el * * @since 7.2 */ protected void hookHtml5Events(Element el) { hookHtml5Events(DOM.asOld(el)); }
3.68
hmily_JavaBeanBinder_addField
/** * Add field. * * @param field the field */ void addField(final Field field) { if (this.field == null) { this.field = field; } }
3.68
hbase_RSGroupBasedLoadBalancer_filterServers
/** * Filter servers based on the online servers. * <p/> * servers is actually a TreeSet (see {@link org.apache.hadoop.hbase.rsgroup.RSGroupInfo}), having * its contains()'s time complexity as O(logn), which is good enough. * <p/> * TODO: consider using HashSet to pursue O(1) for contains() throughout the calling chain if * needed. * @param servers the servers * @param onlineServers List of servers which are online. * @return the list */ private List<ServerName> filterServers(Set<Address> servers, List<ServerName> onlineServers) { ArrayList<ServerName> finalList = new ArrayList<>(); for (ServerName onlineServer : onlineServers) { if (servers.contains(onlineServer.getAddress())) { finalList.add(onlineServer); } } return finalList; }
3.68
hadoop_Retryer_continueRetry
/** * Returns true if retrying should continue, false otherwise. * * @return true if the caller should retry, false otherwise. */ public boolean continueRetry() { if (this.delay >= this.maxDelay) { return false; } try { Thread.sleep(this.perRetryDelay); } catch (InterruptedException e) { // Ignore the exception as required by the semantic of this class; } this.delay += this.perRetryDelay; return true; }
3.68
hadoop_ActiveAuditManagerS3A_activate
/** * Makes this the thread's active span and activate. * If the span was already active: no-op. */ @Override public AuditSpanS3A activate() { if (!isActive()) { switchToActiveSpan(this); span.activate(); } return this; }
3.68
hadoop_Cluster_close
/** * Close the <code>Cluster</code>. * @throws IOException */ public synchronized void close() throws IOException { clientProtocolProvider.close(client); }
3.68
flink_ExecutionJobVertex_getOperatorIDs
/** * Returns a list containing the ID pairs of all operators contained in this execution job * vertex. * * @return list containing the ID pairs of all contained operators */ public List<OperatorIDPair> getOperatorIDs() { return jobVertex.getOperatorIDs(); }
3.68
hbase_RegionPlacementMaintainer_checkDifferencesWithOldPlan
/** * Compares two plans and check whether the locality dropped or increased (prints the information * as a string) also prints the baseline locality * @param movesPerTable - how many primary regions will move per table * @param regionLocalityMap - locality map from FS * @param newPlan - new assignment plan */ public void checkDifferencesWithOldPlan(Map<TableName, Integer> movesPerTable, Map<String, Map<String, Float>> regionLocalityMap, FavoredNodesPlan newPlan) throws IOException { // localities for primary, secondary and tertiary SnapshotOfRegionAssignmentFromMeta snapshot = this.getRegionAssignmentSnapshot(); FavoredNodesPlan oldPlan = snapshot.getExistingAssignmentPlan(); Set<TableName> tables = snapshot.getTableSet(); Map<TableName, List<RegionInfo>> tableToRegionsMap = snapshot.getTableToRegionMap(); for (TableName table : tables) { float[] deltaLocality = new float[3]; float[] locality = new float[3]; if (!this.targetTableSet.isEmpty() && !this.targetTableSet.contains(table)) { continue; } List<RegionInfo> regions = tableToRegionsMap.get(table); System.out.println("=================================================="); System.out.println("Assignment Plan Projection Report For Table: " + table); System.out.println("\t Total regions: " + regions.size()); System.out.println( "\t" + movesPerTable.get(table) + " primaries will move due to their primary has changed"); for (RegionInfo currentRegion : regions) { Map<String, Float> regionLocality = regionLocalityMap.get(currentRegion.getEncodedName()); if (regionLocality == null) { continue; } List<ServerName> oldServers = oldPlan.getFavoredNodes(currentRegion); List<ServerName> newServers = newPlan.getFavoredNodes(currentRegion); if (newServers != null && oldServers != null) { int i = 0; for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) { ServerName newServer = newServers.get(p.ordinal()); ServerName oldServer = oldServers.get(p.ordinal()); Float oldLocality = 0f; if (oldServers != null) { oldLocality = regionLocality.get(oldServer.getHostname()); if (oldLocality == null) { oldLocality = 0f; } locality[i] += oldLocality; } Float newLocality = regionLocality.get(newServer.getHostname()); if (newLocality == null) { newLocality = 0f; } deltaLocality[i] += newLocality - oldLocality; i++; } } } DecimalFormat df = new java.text.DecimalFormat("#.##"); for (int i = 0; i < deltaLocality.length; i++) { System.out.print("\t\t Baseline locality for "); if (i == 0) { System.out.print("primary "); } else if (i == 1) { System.out.print("secondary "); } else if (i == 2) { System.out.print("tertiary "); } System.out.println(df.format(100 * locality[i] / regions.size()) + "%"); System.out.print("\t\t Locality will change with the new plan: "); System.out.println(df.format(100 * deltaLocality[i] / regions.size()) + "%"); } System.out.println("\t Baseline dispersion"); printDispersionScores(table, snapshot, regions.size(), null, true); System.out.println("\t Projected dispersion"); printDispersionScores(table, snapshot, regions.size(), newPlan, true); } }
3.68
flink_TypeExtractionUtils_getTypeHierarchy
/** * Traverses the type hierarchy of a type up until a certain stop class is found. * * @param t type for which a hierarchy need to be created * @return type of the immediate child of the stop class */ public static Type getTypeHierarchy(List<Type> typeHierarchy, Type t, Class<?> stopAtClass) { while (!(isClassType(t) && typeToClass(t).equals(stopAtClass))) { typeHierarchy.add(t); t = typeToClass(t).getGenericSuperclass(); if (t == null) { break; } } return t; }
3.68
graphhopper_OSMReaderConfig_setWorkerThreads
/** * Sets the number of threads used for the OSM import */ public OSMReaderConfig setWorkerThreads(int workerThreads) { this.workerThreads = workerThreads; return this; }
3.68
pulsar_AbstractSinkRecord_individualAck
/** * Some sink sometimes wants to control the ack type. */ public void individualAck() { if (sourceRecord instanceof PulsarRecord) { PulsarRecord pulsarRecord = (PulsarRecord) sourceRecord; pulsarRecord.individualAck(); } else { throw new RuntimeException("SourceRecord class type must be PulsarRecord"); } }
3.68
flink_ExecutionConfig_getTaskCancellationInterval
/** * Gets the interval (in milliseconds) between consecutive attempts to cancel a running task. */ public long getTaskCancellationInterval() { return configuration.get(TaskManagerOptions.TASK_CANCELLATION_INTERVAL); }
3.68
pulsar_RangeCache_clear
/** * Remove all the entries from the cache. * * @return size of removed entries */ public synchronized Pair<Integer, Long> clear() { long removedSize = 0; int removedCount = 0; while (true) { Map.Entry<Key, Value> entry = entries.pollFirstEntry(); if (entry == null) { break; } Value value = entry.getValue(); removedSize += weighter.getSize(value); removedCount++; value.release(); } size.getAndAdd(-removedSize); return Pair.of(removedCount, removedSize); }
3.68
AreaShop_GeneralRegion_getLowerCaseName
/** * Get the lowercase region name. * @return The region name in lowercase */ public String getLowerCaseName() { return getName().toLowerCase(); }
3.68
pulsar_BlobStoreManagedLedgerOffloader_offload
/** * Upload the DataBlocks associated with the given ReadHandle using MultiPartUpload, * Creating indexBlocks for each corresponding DataBlock that is uploaded. */ @Override public CompletableFuture<Void> offload(ReadHandle readHandle, UUID uuid, Map<String, String> extraMetadata) { final String managedLedgerName = extraMetadata.get(MANAGED_LEDGER_NAME); final String topicName = TopicName.fromPersistenceNamingEncoding(managedLedgerName); CompletableFuture<Void> promise = new CompletableFuture<>(); scheduler.chooseThread(readHandle.getId()).execute(() -> { final BlobStore writeBlobStore = getBlobStore(config.getBlobStoreLocation()); log.info("offload {} uuid {} extraMetadata {} to {} {}", readHandle.getId(), uuid, extraMetadata, config.getBlobStoreLocation(), writeBlobStore); if (readHandle.getLength() == 0 || !readHandle.isClosed() || readHandle.getLastAddConfirmed() < 0) { promise.completeExceptionally( new IllegalArgumentException("An empty or open ledger should never be offloaded")); return; } OffloadIndexBlockBuilder indexBuilder = OffloadIndexBlockBuilder.create() .withLedgerMetadata(readHandle.getLedgerMetadata()) .withDataBlockHeaderLength(BlockAwareSegmentInputStreamImpl.getHeaderSize()); String dataBlockKey = DataBlockUtils.dataBlockOffloadKey(readHandle.getId(), uuid); String indexBlockKey = DataBlockUtils.indexBlockOffloadKey(readHandle.getId(), uuid); log.info("ledger {} dataBlockKey {} indexBlockKey {}", readHandle.getId(), dataBlockKey, indexBlockKey); MultipartUpload mpu = null; List<MultipartPart> parts = Lists.newArrayList(); // init multi part upload for data block. try { BlobBuilder blobBuilder = writeBlobStore.blobBuilder(dataBlockKey); Map<String, String> objectMetadata = new HashMap<>(userMetadata); objectMetadata.put("role", "data"); if (extraMetadata != null) { objectMetadata.putAll(extraMetadata); } DataBlockUtils.addVersionInfo(blobBuilder, objectMetadata); Blob blob = blobBuilder.build(); log.info("initiateMultipartUpload bucket {}, metadata {} ", config.getBucket(), blob.getMetadata()); mpu = writeBlobStore.initiateMultipartUpload(config.getBucket(), blob.getMetadata(), new PutOptions()); } catch (Throwable t) { promise.completeExceptionally(t); return; } long dataObjectLength = 0; // start multi part upload for data block. try { long startEntry = 0; int partId = 1; long start = System.nanoTime(); long entryBytesWritten = 0; while (startEntry <= readHandle.getLastAddConfirmed()) { int blockSize = BlockAwareSegmentInputStreamImpl .calculateBlockSize(config.getMaxBlockSizeInBytes(), readHandle, startEntry, entryBytesWritten); try (BlockAwareSegmentInputStream blockStream = new BlockAwareSegmentInputStreamImpl( readHandle, startEntry, blockSize, this.offloaderStats, managedLedgerName)) { Payload partPayload = Payloads.newInputStreamPayload(blockStream); partPayload.getContentMetadata().setContentLength((long) blockSize); partPayload.getContentMetadata().setContentType("application/octet-stream"); parts.add(writeBlobStore.uploadMultipartPart(mpu, partId, partPayload)); log.debug("UploadMultipartPart. container: {}, blobName: {}, partId: {}, mpu: {}", config.getBucket(), dataBlockKey, partId, mpu.id()); indexBuilder.addBlock(startEntry, partId, blockSize); if (blockStream.getEndEntryId() != -1) { startEntry = blockStream.getEndEntryId() + 1; } else { // could not read entry from ledger. break; } entryBytesWritten += blockStream.getBlockEntryBytesCount(); partId++; this.offloaderStats.recordOffloadBytes(topicName, blockStream.getBlockEntryBytesCount()); } dataObjectLength += blockSize; } String etag = writeBlobStore.completeMultipartUpload(mpu, parts); log.info("Ledger {}, upload finished, etag {}", readHandle.getId(), etag); mpu = null; } catch (Throwable t) { try { if (mpu != null) { writeBlobStore.abortMultipartUpload(mpu); } } catch (Throwable throwable) { log.error("Failed abortMultipartUpload in bucket - {} with key - {}, uploadId - {}.", config.getBucket(), dataBlockKey, mpu.id(), throwable); } this.offloaderStats.recordWriteToStorageError(topicName); this.offloaderStats.recordOffloadError(topicName); promise.completeExceptionally(t); return; } // upload index block try (OffloadIndexBlock index = indexBuilder.withDataObjectLength(dataObjectLength).build(); IndexInputStream indexStream = index.toStream()) { // write the index block BlobBuilder blobBuilder = writeBlobStore.blobBuilder(indexBlockKey); Map<String, String> objectMetadata = new HashMap<>(userMetadata); objectMetadata.put("role", "index"); if (extraMetadata != null) { objectMetadata.putAll(extraMetadata); } DataBlockUtils.addVersionInfo(blobBuilder, objectMetadata); Payload indexPayload = Payloads.newInputStreamPayload(indexStream); indexPayload.getContentMetadata().setContentLength((long) indexStream.getStreamSize()); indexPayload.getContentMetadata().setContentType("application/octet-stream"); Blob blob = blobBuilder .payload(indexPayload) .contentLength((long) indexStream.getStreamSize()) .build(); writeBlobStore.putBlob(config.getBucket(), blob); promise.complete(null); } catch (Throwable t) { try { writeBlobStore.removeBlob(config.getBucket(), dataBlockKey); } catch (Throwable throwable) { log.error("Failed deleteObject in bucket - {} with key - {}.", config.getBucket(), dataBlockKey, throwable); } this.offloaderStats.recordWriteToStorageError(topicName); this.offloaderStats.recordOffloadError(topicName); promise.completeExceptionally(t); return; } }); return promise; }
3.68
hudi_HoodieLogFileReader_hasNext
/* * hasNext is not idempotent. TODO - Fix this. It is okay for now - PR */ @Override public boolean hasNext() { try { return readMagic(); } catch (IOException e) { throw new HoodieIOException("IOException when reading logfile " + logFile, e); } }
3.68
framework_AbstractComponent_setHeightUndefined
/* * (non-Javadoc) * * @see com.vaadin.server.Sizeable#setHeightUndefined() */ @Override public void setHeightUndefined() { setHeight(-1, Unit.PIXELS); }
3.68
hudi_RunLengthDecoder_initWidthAndPacker
/** * Initializes the internal state for decoding ints of `bitWidth`. */ private void initWidthAndPacker(int bitWidth) { Preconditions.checkArgument(bitWidth >= 0 && bitWidth <= 32, "bitWidth must be >= 0 and <= 32"); this.bitWidth = bitWidth; this.bytesWidth = BytesUtils.paddedByteCountFromBits(bitWidth); this.packer = Packer.LITTLE_ENDIAN.newBytePacker(bitWidth); }
3.68
hadoop_StageConfig_enterStage
/** * Enter the stage; calls back to * {@link #enterStageEventHandler} if non-null. * @param stage stage entered */ public void enterStage(String stage) { if (enterStageEventHandler != null) { enterStageEventHandler.enterStage(stage); } }
3.68
hmily_RpcMediator_getAndSet
/** * Gets and set. * * @param function the function * @param biConsumer the bi consumer */ public void getAndSet(final Function<String, Object> function, final BiConsumer<String, Object> biConsumer) { Object result = function.apply(CommonConstant.HMILY_TRANSACTION_CONTEXT); if (Objects.nonNull(result)) { biConsumer.accept(CommonConstant.HMILY_TRANSACTION_CONTEXT, result); } }
3.68
hbase_ReplicationSourceManager_removePeer
/** * <ol> * <li>Remove peer for replicationPeers</li> * <li>Remove all the recovered sources for the specified id and related replication queues</li> * <li>Remove the normal source and related replication queue</li> * <li>Remove HFile Refs</li> * </ol> * @param peerId the id of the replication peer */ public void removePeer(String peerId) { ReplicationPeer peer = replicationPeers.removePeer(peerId); String terminateMessage = "Replication stream was removed by a user"; List<ReplicationSourceInterface> oldSourcesToDelete = new ArrayList<>(); // synchronized on oldsources to avoid adding recovered source for the to-be-removed peer // see NodeFailoverWorker.run synchronized (this.oldsources) { // First close all the recovered sources for this peer for (ReplicationSourceInterface src : oldsources) { if (peerId.equals(src.getPeerId())) { oldSourcesToDelete.add(src); } } for (ReplicationSourceInterface src : oldSourcesToDelete) { src.terminate(terminateMessage); removeRecoveredSource(src); } } LOG.info("Number of deleted recovered sources for {}: {}", peerId, oldSourcesToDelete.size()); // Now close the normal source for this peer ReplicationSourceInterface srcToRemove = this.sources.get(peerId); if (srcToRemove != null) { srcToRemove.terminate(terminateMessage); removeSource(srcToRemove); } ReplicationPeerConfig peerConfig = peer.getPeerConfig(); if (peerConfig.isSyncReplication()) { syncReplicationPeerMappingManager.remove(peerId, peerConfig); } }
3.68
hbase_Client_getCluster
/** Returns the cluster definition */ public Cluster getCluster() { return cluster; }
3.68
dubbo_MockInvoker_normalizeMock
/** * Normalize mock string: * * <ol> * <li>return => return null</li> * <li>fail => default</li> * <li>force => default</li> * <li>fail:throw/return foo => throw/return foo</li> * <li>force:throw/return foo => throw/return foo</li> * </ol> * * @param mock mock string * @return normalized mock string */ public static String normalizeMock(String mock) { if (mock == null) { return mock; } mock = mock.trim(); if (mock.length() == 0) { return mock; } if (RETURN_KEY.equalsIgnoreCase(mock)) { return RETURN_PREFIX + "null"; } if (ConfigUtils.isDefault(mock) || "fail".equalsIgnoreCase(mock) || "force".equalsIgnoreCase(mock)) { return "default"; } if (mock.startsWith(FAIL_PREFIX)) { mock = mock.substring(FAIL_PREFIX.length()).trim(); } if (mock.startsWith(FORCE_PREFIX)) { mock = mock.substring(FORCE_PREFIX.length()).trim(); } if (mock.startsWith(RETURN_PREFIX) || mock.startsWith(THROW_PREFIX)) { mock = mock.replace('`', '"'); } return mock; }
3.68