name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hudi_SqlQueryBuilder_limit
/** * Appends a "limit" clause to a query. * * @param count The limit count. * @return The {@link SqlQueryBuilder} instance. */ public SqlQueryBuilder limit(long count) { if (count < 0) { throw new IllegalArgumentException("Please provide a positive integer for the LIMIT clause."); } sqlBuilder.append(" limit "); sqlBuilder.append(count); return this; }
3.68
hmily_AggregateBinder_bind
/** * Bind object. * * @param name the name * @param target the target * @param elementBinder the element binder * @return the object */ @SuppressWarnings("unchecked") public Object bind(final PropertyName name, final BindData<?> target, final AggregateElementBinder elementBinder) { Object result = bind(name, target, getEnv(), elementBinder); Supplier<?> targetValue = target.getValue(); if (result == null || targetValue == null) { return result; } return merge(targetValue, (T) result); }
3.68
hbase_StoreFileInfo_getReferredToFile
/* * Return path to the file referred to by a Reference. Presumes a directory hierarchy of * <code>${hbase.rootdir}/data/${namespace}/tablename/regionname/familyname</code>. * @param p Path to a Reference file. * @return Calculated path to parent region file. * @throws IllegalArgumentException when path regex fails to match. */ public static Path getReferredToFile(final Path p) { Matcher m = REF_NAME_PATTERN.matcher(p.getName()); if (m == null || !m.matches()) { LOG.warn("Failed match of store file name {}", p.toString()); throw new IllegalArgumentException("Failed match of store file name " + p.toString()); } // Other region name is suffix on the passed Reference file name String otherRegion = m.group(2); // Tabledir is up two directories from where Reference was written. Path tableDir = p.getParent().getParent().getParent(); String nameStrippedOfSuffix = m.group(1); LOG.trace("reference {} to region={} hfile={}", p, otherRegion, nameStrippedOfSuffix); // Build up new path with the referenced region in place of our current // region in the reference path. Also strip regionname suffix from name. return new Path(new Path(new Path(tableDir, otherRegion), p.getParent().getName()), nameStrippedOfSuffix); }
3.68
morf_SqlDialect_appendJoin
/** * @param result the string builder to append to * @param join the join statement * @param innerJoinKeyword usually an INNER JOIN, but this can be changed for optimisations */ protected void appendJoin(StringBuilder result, Join join, String innerJoinKeyword) { // Put the type in switch (join.getType()) { case INNER_JOIN: result.append(" ").append(innerJoinKeyword).append(" "); break; case LEFT_OUTER_JOIN: result.append(" LEFT OUTER JOIN "); break; case FULL_OUTER_JOIN: result.append(" FULL OUTER JOIN "); break; default: throw new UnsupportedOperationException("Cannot perform join of type [" + join.getType() + "] on database"); } if (join.getTable() == null && (join.getSubSelect() == null || join.getSubSelect().getAlias() == null)) { throw new IllegalArgumentException("Join clause does not specify table or sub-select with an alias"); } if (join.getTable() == null) { result.append('('); result.append(getSqlFrom(join.getSubSelect())); result.append(") "); result.append(join.getSubSelect().getAlias()); } else { // Now add the table name result.append(tableNameWithSchemaName(join.getTable())); // And add an alias if necessary if (!join.getTable().getAlias().isEmpty()) { result.append(" ").append(join.getTable().getAlias()); } } if (join.getCriterion() != null) { result.append(" ON "); // Then put the join fields into the output result.append(getSqlFrom(join.getCriterion())); } else if (join.getType() == JoinType.LEFT_OUTER_JOIN || join.getType() == JoinType.FULL_OUTER_JOIN) { throw new IllegalArgumentException(join.getType() + " must have ON criteria"); } else { // MySql supports no ON criteria and ON TRUE, but the other platforms // don't, so just keep things simple. result.append(String.format(" ON 1=1")); } }
3.68
hbase_CheckAndMutate_newBuilder
/** * returns a builder object to build a CheckAndMutate object * @param row row * @return a builder object */ public static Builder newBuilder(byte[] row) { return new Builder(row); }
3.68
hadoop_BaseRecord_isExpired
/** * Check if this record is expired. The default is false. Override for * customized behavior. * * @return True if the record is expired. */ public boolean isExpired() { return false; }
3.68
hmily_TableMetaData_getColumnMetaData
/** * Get column meta data. * * @param columnIndex column index * @return column meta data */ public ColumnMetaData getColumnMetaData(final int columnIndex) { return columns.get(columnNames.get(columnIndex)); }
3.68
hadoop_ReencryptionHandler_startUpdaterThread
/** * Start the re-encryption updater thread. */ void startUpdaterThread() { updaterExecutor = Executors.newSingleThreadExecutor( new ThreadFactoryBuilder().setDaemon(true) .setNameFormat("reencryptionUpdaterThread #%d").build()); updaterExecutor.execute(reencryptionUpdater); }
3.68
framework_VLoadingIndicator_getThirdDelay
/** * Returns the delay (in ms) which must pass before the loading indicator * moves to its "third" state. * * @return The delay (in ms) until the loading indicator moves into its * "third" state. Counted from when {@link #trigger()} is called. */ public int getThirdDelay() { return thirdDelay; }
3.68
hbase_MobUtils_hasMobColumns
/** * Checks whether this table has mob-enabled columns. * @param htd The current table descriptor. * @return Whether this table has mob-enabled columns. */ public static boolean hasMobColumns(TableDescriptor htd) { ColumnFamilyDescriptor[] hcds = htd.getColumnFamilies(); for (ColumnFamilyDescriptor hcd : hcds) { if (hcd.isMobEnabled()) { return true; } } return false; }
3.68
hbase_HBaseReplicationEndpoint_reconnect
/** * A private method used to re-establish a zookeeper session with a peer cluster. */ private void reconnect(KeeperException ke) { if ( ke instanceof ConnectionLossException || ke instanceof SessionExpiredException || ke instanceof AuthFailedException ) { String clusterKey = ctx.getPeerConfig().getClusterKey(); LOG.warn("Lost the ZooKeeper connection for peer {}", clusterKey, ke); try { reloadZkWatcher(); } catch (IOException io) { LOG.warn("Creation of ZookeeperWatcher failed for peer {}", clusterKey, io); } } }
3.68
hadoop_CSQueueStore_getQueues
/** * Returns all queues as a list. * @return List containing all the queues */ public Collection<CSQueue> getQueues() { try { modificationLock.readLock().lock(); return ImmutableList.copyOf(fullNameQueues.values()); } finally { modificationLock.readLock().unlock(); } }
3.68
hadoop_FileIoProvider_getHardLinkCount
/** * Retrieves the number of links to the specified file. * * @param volume target volume. null if unavailable. * @param f file whose link count is being queried. * @return number of hard-links to the given file, including the * given path itself. * @throws IOException */ public int getHardLinkCount( @Nullable FsVolumeSpi volume, File f) throws IOException { final long begin = profilingEventHook.beforeMetadataOp(volume, LIST); try { faultInjectorEventHook.beforeMetadataOp(volume, LIST); int count = HardLink.getLinkCount(f); profilingEventHook.afterMetadataOp(volume, LIST, begin); return count; } catch(Exception e) { onFailure(volume, begin); throw e; } }
3.68
flink_SinkTestSuiteBase_testMetrics
/** * Test connector sink metrics. * * <p>This test will create a sink in the external system, generate test data and write them to * the sink via a Flink job. Then read and compare the metrics. * * <p>Now test: numRecordsOut */ @TestTemplate @DisplayName("Test sink metrics") public void testMetrics( TestEnvironment testEnv, DataStreamSinkExternalContext<T> externalContext, CheckpointingMode semantic) throws Exception { TestingSinkSettings sinkSettings = getTestingSinkSettings(semantic); int parallelism = 1; final List<T> testRecords = generateTestData(sinkSettings, externalContext); // make sure use different names when executes multi times String sinkName = "metricTestSink" + testRecords.hashCode(); final StreamExecutionEnvironment env = testEnv.createExecutionEnvironment( TestEnvironmentSettings.builder() .setConnectorJarPaths(externalContext.getConnectorJarPaths()) .build()); env.enableCheckpointing(50); DataStreamSource<T> source = env.fromSource( new FromElementsSource<>( Boundedness.CONTINUOUS_UNBOUNDED, testRecords, testRecords.size()), WatermarkStrategy.noWatermarks(), "metricTestSource") .setParallelism(1); DataStream<T> dataStream = source.returns(externalContext.getProducedType()); tryCreateSink(dataStream, externalContext, sinkSettings) .name(sinkName) .setParallelism(parallelism); final JobClient jobClient = env.executeAsync("Metrics Test"); final MetricQuerier queryRestClient = new MetricQuerier(new Configuration()); final ExecutorService executorService = Executors.newCachedThreadPool(); try { waitForAllTaskRunning( () -> getJobDetails( new RestClient(new Configuration(), executorService), testEnv.getRestEndpoint(), jobClient.getJobID())); waitUntilCondition( () -> { // test metrics try { return compareSinkMetrics( queryRestClient, testEnv, externalContext, jobClient.getJobID(), sinkName, MetricNames.NUM_RECORDS_SEND, testRecords.size()); } catch (Exception e) { // skip failed assert try return false; } }); } finally { // Clean up executorService.shutdown(); killJob(jobClient); } }
3.68
flink_Tuple16_copy
/** * Shallow tuple copy. * * @return A new Tuple with the same fields as this. */ @Override @SuppressWarnings("unchecked") public Tuple16<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15> copy() { return new Tuple16<>( this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8, this.f9, this.f10, this.f11, this.f12, this.f13, this.f14, this.f15); }
3.68
flink_OptionalUtils_firstPresent
/** Returns the first {@link Optional} which is present. */ @SafeVarargs public static <T> Optional<T> firstPresent(Optional<T>... opts) { for (Optional<T> opt : opts) { if (opt.isPresent()) { return opt; } } return Optional.empty(); }
3.68
framework_Slot_onDetach
/* * (non-Javadoc) * * @see com.google.gwt.user.client.ui.Widget#onDetach() */ @Override protected void onDetach() { if (spacer != null) { spacer.removeFromParent(); } super.onDetach(); }
3.68
hadoop_EncryptionSecrets_init
/** * Init all state, including after any read. * @throws IOException error rebuilding state. */ private void init() throws IOException { encryptionMethod = S3AEncryptionMethods.getMethod( encryptionAlgorithm); }
3.68
framework_DesignFormatter_encodeForTextNode
/** * <p> * Encodes <em>some</em> special characters in a given input String to make * it ready to be written as contents of a text node. WARNING: this will * e.g. encode "&lt;someTag&gt;" to "&amp;lt;someTag&amp;gt;" as this method * doesn't do any parsing and assumes that there are no intended HTML * elements in the input. Only some entities are actually encoded: * &amp;,&lt;, &gt; It's assumed that other entities are taken care of by * Jsoup. * </p> * <p> * Typically, this method will be used by components to encode data (like * option items in {@code AbstractSelect}) when dumping to HTML format * </p> * * @since 7.5.7 * @param input * String to be encoded * @return String with &amp;,&lt; and &gt; replaced with their HTML entities */ public static String encodeForTextNode(String input) { if (input == null) { return null; } return input.replace("&", "&amp;").replace(">", "&gt;").replace("<", "&lt;"); }
3.68
pulsar_NoStrictCacheSizeAllocator_release
/** * This method used to release used cache size and add available cache size. * in normal case, the available size shouldn't exceed max cache size. * * @param size release size */ public void release(long size) { lock.lock(); try { availableCacheSize.add(size); if (availableCacheSize.longValue() > maxCacheSize) { availableCacheSize.reset(); availableCacheSize.add(maxCacheSize); } } finally { lock.unlock(); } }
3.68
flink_SuperstepBarrier_onEvent
/** Barrier will release the waiting thread if an event occurs. */ @Override public void onEvent(TaskEvent event) { if (event instanceof TerminationEvent) { terminationSignaled = true; } else if (event instanceof AllWorkersDoneEvent) { AllWorkersDoneEvent wde = (AllWorkersDoneEvent) event; aggregatorNames = wde.getAggregatorNames(); aggregates = wde.getAggregates(userCodeClassLoader); } else { throw new IllegalArgumentException("Unknown event type."); } latch.countDown(); }
3.68
flink_MemorySegment_putChar
/** * Writes a char value to the given position, in the system's native byte order. * * @param index The position at which the memory will be written. * @param value The char value to be written. * @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the * segment size minus 2. */ @SuppressWarnings("restriction") public void putChar(int index, char value) { final long pos = address + index; if (index >= 0 && pos <= addressLimit - 2) { UNSAFE.putChar(heapMemory, pos, value); } else if (address > addressLimit) { throw new IllegalStateException("segment has been freed"); } else { // index is in fact invalid throw new IndexOutOfBoundsException(); } }
3.68
flink_ZooKeeperStateHandleStore_hasLock
/** * Checks whether a lock is created for this instance on the passed ZooKeeper node. * * @param rootPath The node that shall be checked. * @return {@code true} if the lock exists; {@code false} otherwise. */ private boolean hasLock(String rootPath) throws Exception { final String normalizedRootPath = normalizePath(rootPath); try { return client.checkExists().forPath(getInstanceLockPath(normalizedRootPath)) != null; } catch (KeeperException.NoNodeException e) { // this is the case if the node is marked for deletion or already deleted return false; } }
3.68
hbase_UnassignProcedure_remoteCallFailed
/** * Returns If true, we will re-wake up this procedure; if false, the procedure stays suspended. */ @Override protected boolean remoteCallFailed(final MasterProcedureEnv env, final RegionStateNode regionNode, final IOException exception) { return true; }
3.68
flink_FlinkContainersSettings_defaultConfig
/** * {@code FlinkContainersConfig} based on defaults. * * @return The Flink containers config. */ public static FlinkContainersSettings defaultConfig() { return builder().build(); }
3.68
framework_LocatorUtil_isNotificationElement
/** * Checks if path refers to vaadin Notification element * com.vaadin.ui.Notification. * * @param path * to vaadin element * @return true if path refers to Notification element, false otherwise */ public static boolean isNotificationElement(String path) { String regex = "^\\/{0,2}(com\\.vaadin\\.ui\\.)?V?Notification[\\/\\[]?"; RegExp regexp = RegExp.compile(regex); return regexp.test(path); }
3.68
morf_OracleDialect_makeStringLiteral
/** * Turn a string value into an SQL string literal which has that value. * <p> * We use {@linkplain StringUtils#isEmpty(CharSequence)} because we want to * differentiate between a single space and an empty string. * </p> * <p> * This is necessary because char types cannot be null and must contain * a single space. * <p> * * @param literalValue the literal value of the string. * @return SQL String Literal */ @Override protected String makeStringLiteral(String literalValue) { if (StringUtils.isEmpty(literalValue)) { return "NULL"; } return String.format("N'%s'", super.escapeSql(literalValue)); }
3.68
hbase_ProcedureCoordinator_memberAcquiredBarrier
/** * Notification that the procedure had the specified member acquired its part of the barrier via * {@link Subprocedure#acquireBarrier()}. * @param procName name of the procedure that acquired * @param member name of the member that acquired */ void memberAcquiredBarrier(String procName, final String member) { Procedure proc = procedures.get(procName); if (proc == null) { LOG.warn( "Member '" + member + "' is trying to acquire an unknown procedure '" + procName + "'"); return; } if (LOG.isTraceEnabled()) { LOG.trace("Member '" + member + "' acquired procedure '" + procName + "'"); } proc.barrierAcquiredByMember(member); }
3.68
graphhopper_VectorTile_getValuesList
/** * <pre> * Dictionary encoding for values * </pre> * * <code>repeated .vector_tile.Tile.Value values = 4;</code> */ public java.util.List<vector_tile.VectorTile.Tile.Value> getValuesList() { if (valuesBuilder_ == null) { return java.util.Collections.unmodifiableList(values_); } else { return valuesBuilder_.getMessageList(); } }
3.68
hadoop_AbstractOperationAuditor_getOptions
/** * Get the options this auditor was initialized with. * @return options. */ protected OperationAuditorOptions getOptions() { return options; }
3.68
framework_VScrollTable_restoreRowVisibility
/** * Restore row visibility which is set to "none" when the row is * rendered (due a performance optimization). */ private void restoreRowVisibility() { for (Widget row : renderedRows) { row.getElement().getStyle().setProperty("visibility", ""); } }
3.68
morf_SpreadsheetDataSetConsumer_createTitle
/** * Inserts a row at the top of the sheet with the given title * @param sheet add the title to * @param title to add */ protected void createTitle(WritableSheet sheet, String title) { try { Label cell = new Label(0, 0, title); WritableFont headingFont = new WritableFont(WritableFont.ARIAL, 16, WritableFont.BOLD); WritableCellFormat headingFormat = new WritableCellFormat(headingFont); cell.setCellFormat(headingFormat); sheet.addCell(cell); cell = new Label(12, 0, "Copyright " + new SimpleDateFormat("yyyy").format(new Date()) + " Alfa Financial Software Ltd."); WritableCellFormat copyrightFormat = new WritableCellFormat(); copyrightFormat.setAlignment(Alignment.RIGHT); cell.setCellFormat(copyrightFormat); sheet.addCell(cell); } catch (Exception e) { throw new RuntimeException(e); } }
3.68
framework_AbstractContainer_setPropertySetChangeListeners
/** * Sets the property set change listener collection. For internal use only. * * @param propertySetChangeListeners */ protected void setPropertySetChangeListeners( Collection<Container.PropertySetChangeListener> propertySetChangeListeners) { this.propertySetChangeListeners = propertySetChangeListeners; }
3.68
pulsar_PulsarMockLedgerHandle_readAsync
// ReadHandle interface @Override public CompletableFuture<LedgerEntries> readAsync(long firstEntry, long lastEntry) { return readHandle.readAsync(firstEntry, lastEntry); }
3.68
dubbo_TypeDefinitionBuilder_build
/** * Build the instance of {@link TypeDefinition} from the specified {@link TypeMirror type} * * @param processingEnv {@link ProcessingEnvironment} * @param type {@link TypeMirror type} * @return non-null */ static TypeDefinition build( ProcessingEnvironment processingEnv, TypeMirror type, Map<String, TypeDefinition> typeCache) { // Build by all instances of TypeDefinitionBuilder that were loaded By Java SPI TypeDefinition typeDefinition = ApplicationModel.defaultModel() .getExtensionLoader(TypeBuilder.class) .getSupportedExtensionInstances() .stream() // load(TypeDefinitionBuilder.class, TypeDefinitionBuilder.class.getClassLoader()) .filter(builder -> builder.accept(processingEnv, type)) .findFirst() .map(builder -> { return builder.build(processingEnv, type, typeCache); // typeDefinition.setTypeBuilderName(builder.getClass().getName()); }) .orElse(null); if (typeDefinition != null) { typeCache.put(typeDefinition.getType(), typeDefinition); } return typeDefinition; }
3.68
morf_ViewChanges_visit
/** * Visit the selected node for the topological sort. If it has been marked start working * back up the list. Otherwise, mark it and then try visiting all of its dependent nodes. * * @param node the node being visited. * @param sortedList the list of sorted results. Items in this list are 'permanently' marked e.g. node is sorted. * @param temporarilyMarkedRecords a set of nodes we've already visited. Items in this list are 'temporarily' marked e.g. node is visited. */ private void visit(String node, Set<String> temporarilyMarkedRecords, List<String> sortedList, Map<String, View> viewIndex) { if (log.isDebugEnabled()) { log.debug("Visiting node: " + node); } // Check if we have hit a temporary mark. We should not have done this as we cannot sort collections which contain circular dependencies. // we can only sort trees and Directed Acyclic Graphs. if (temporarilyMarkedRecords.contains(node)) { throw new IllegalStateException("Views requested have a circular dependency."); } // If the node isn't marked at all. Mark it if (sortedList.contains(node)) { return; } temporarilyMarkedRecords.add(node); for (String dependentView: viewIndex.get(node).getDependencies()) { visit(dependentView, temporarilyMarkedRecords, sortedList, viewIndex); if (dropSet.contains(dependentView)) { if (log.isDebugEnabled()) log.debug("Expanding views to drop to include " + node + " because it depends on " + dependentView); dropNode(node); } } sortedList.add(node); // Permanently mark the node as sorted temporarilyMarkedRecords.remove(node); // remove temporary mark }
3.68
hbase_Mutation_setClusterIds
/** * Marks that the clusters with the given clusterIds have consumed the mutation * @param clusterIds of the clusters that have consumed the mutation */ public Mutation setClusterIds(List<UUID> clusterIds) { ByteArrayDataOutput out = ByteStreams.newDataOutput(); out.writeInt(clusterIds.size()); for (UUID clusterId : clusterIds) { out.writeLong(clusterId.getMostSignificantBits()); out.writeLong(clusterId.getLeastSignificantBits()); } setAttribute(CONSUMED_CLUSTER_IDS, out.toByteArray()); return this; }
3.68
pulsar_FixedColumnLengthTableMaker_make
/** * Make a table using the specified settings. * * @param rows Rows to construct the table from. * @return A String version of the table. */ public String make(final Object[][] rows) { final StringBuilder builder = new StringBuilder(); int numColumns = 0; for (final Object[] row : rows) { // Take the largest number of columns out of any row to be the total. numColumns = Math.max(numColumns, row.length); } // Total length of the table in characters. int totalLength = numColumns * (leftPadding + rightPadding + separator.length()) - separator.length() + leftBorder.length() + rightBorder.length(); for (int i = 0; i < numColumns; ++i) { totalLength += lengthFor(i); } addHorizontalBorder(totalLength, builder, topBorder); builder.append('\n'); int i; for (final Object[] row : rows) { i = 0; builder.append(leftBorder); for (final Object element : row) { addSpace(leftPadding, builder); String elementString; if ((element instanceof Float || element instanceof Double) && decimalFormatter != null) { elementString = String.format(decimalFormatter, element); } else { // Avoid throwing NPE elementString = Objects.toString(element, ""); } if (elementString.length() > lengthFor(i)) { // Trim down to the maximum number of characters. elementString = elementString.substring(0, lengthFor(i)); } builder.append(elementString); // Add the space due to remaining characters and the right padding. addSpace(lengthFor(i) - elementString.length() + rightPadding, builder); if (i != numColumns - 1) { // Don't add separator for the last column. builder.append(separator); } i += 1; } // Put empty elements for remaining columns. for (; i < numColumns; ++i) { addSpace(leftPadding + rightPadding + lengthFor(i), builder); if (i != numColumns - 1) { builder.append(separator); } } builder.append(rightBorder); builder.append('\n'); } addHorizontalBorder(totalLength, builder, bottomBorder); return builder.toString(); }
3.68
hudi_HoodieMetaSyncOperations_getMetastoreFieldSchemas
/** * Get the list of field schemas from metastore. */ default List<FieldSchema> getMetastoreFieldSchemas(String tableName) { return Collections.emptyList(); }
3.68
morf_AbstractSqlDialectTest_testNullMetadataError
/** * Tests that passing a null value for the metadata fails. */ @Test public void testNullMetadataError() { InsertStatement stmt = new InsertStatement().into(new TableReference(TEST_TABLE)); try { testDialect.convertStatementToSQL(stmt, null, SqlDialect.IdTable.withDeterministicName(ID_VALUES_TABLE)); fail("Should have raised an exception when null metadata was supplied"); } catch(IllegalArgumentException e) { // Expected exception } }
3.68
pulsar_ConcurrentLongPairSet_remove
/** * Remove an existing entry if found. * * @param item1 * @return true if removed or false if item was not present */ public boolean remove(long item1, long item2) { checkBiggerEqualZero(item1); long h = hash(item1, item2); return getSection(h).remove(item1, item2, (int) h); }
3.68
morf_RenameTable_columns
/** * @see org.alfasoftware.morf.metadata.Table#columns() */ @Override public List<Column> columns() { return baseTable.columns(); }
3.68
hadoop_AzureBlobFileSystem_getOwnerUser
/** * Get the username of the FS. * @return the short name of the user who instantiated the FS */ public String getOwnerUser() { return abfsStore.getUser(); }
3.68
pulsar_PersistentReplicator_getAvailablePermits
/** * Calculate available permits for read entries. * * @return * 0: Producer queue is full, no permits. * -1: Rate Limiter reaches limit. * >0: available permits for read entries. */ private int getAvailablePermits() { int availablePermits = producerQueueSize - PENDING_MESSAGES_UPDATER.get(this); // return 0, if Producer queue is full, it will pause read entries. if (availablePermits <= 0) { if (log.isDebugEnabled()) { log.debug("[{}] Producer queue is full, availablePermits: {}, pause reading", replicatorId, availablePermits); } return 0; } // handle rate limit if (dispatchRateLimiter.isPresent() && dispatchRateLimiter.get().isDispatchRateLimitingEnabled()) { DispatchRateLimiter rateLimiter = dispatchRateLimiter.get(); // no permits from rate limit if (!rateLimiter.hasMessageDispatchPermit()) { if (log.isDebugEnabled()) { log.debug("[{}] message-read exceeded topic replicator message-rate {}/{}," + " schedule after a {}", replicatorId, rateLimiter.getDispatchRateOnMsg(), rateLimiter.getDispatchRateOnByte(), MESSAGE_RATE_BACKOFF_MS); } return -1; } // if dispatch-rate is in msg then read only msg according to available permit long availablePermitsOnMsg = rateLimiter.getAvailableDispatchRateLimitOnMsg(); if (availablePermitsOnMsg > 0) { availablePermits = Math.min(availablePermits, (int) availablePermitsOnMsg); } } return availablePermits; }
3.68
hbase_RemoteProcedureDispatcher_submitTask
// ============================================================================================ // Task Helpers // ============================================================================================ protected final void submitTask(Runnable task) { threadPool.execute(task); }
3.68
framework_VCalendarPanel_setRangeEnd
/** * Sets the end range for this component. The end range is inclusive, and it * depends on the current resolution, what is considered inside the range. * * @param newRangeEnd * - the allowed range's end date */ public void setRangeEnd(Date newRangeEnd) { if (!SharedUtil.equals(rangeEnd, newRangeEnd)) { rangeEnd = newRangeEnd; if (initialRenderDone) { // Dynamic updates to the range needs to render the calendar to // update the element stylenames renderCalendar(); } } }
3.68
hbase_ReplicationSourceLogQueue_remove
/** * Remove head from the queue corresponding to given walGroupId. * @param walGroupId walGroupId */ public void remove(String walGroupId) { PriorityBlockingQueue<Path> queue = getQueue(walGroupId); if (queue == null || queue.isEmpty()) { return; } queue.remove(); // Decrease size logQueue. this.metrics.decrSizeOfLogQueue(); // Re-compute age of oldest wal metric. this.metrics.setOldestWalAge(getOldestWalAge()); }
3.68
hbase_WALKey_getNonce
/** Returns The nonce */ default long getNonce() { return HConstants.NO_NONCE; }
3.68
hadoop_DumpUtil_bytesToHex
/** * Convert bytes into format like 0x02 02 00 80. * If limit is negative or too large, then all bytes will be converted. * * @param bytes bytes. * @param limit limit. * @return bytesToHex. */ public static String bytesToHex(byte[] bytes, int limit) { if (limit <= 0 || limit > bytes.length) { limit = bytes.length; } int len = limit * 2; len += limit; // for ' ' appended for each char len += 2; // for '0x' prefix char[] hexChars = new char[len]; hexChars[0] = '0'; hexChars[1] = 'x'; for (int j = 0; j < limit; j++) { int v = bytes[j] & 0xFF; hexChars[j * 3 + 2] = HEX_CHARS[v >>> 4]; hexChars[j * 3 + 3] = HEX_CHARS[v & 0x0F]; hexChars[j * 3 + 4] = ' '; } return new String(hexChars); }
3.68
hmily_HmilyRepositoryNode_getHmilyTransactionRootPath
/** * Get hmily transaction root path. * * @return hmily transaction root path */ public String getHmilyTransactionRootPath() { return Joiner.on("/").join("", ROOT_PATH_PREFIX, appName, HMILY_TRANSACTION_GLOBAL); }
3.68
flink_S3TestCredentials_getTestBucketUriWithScheme
/** * Gets the URI for the path under which all tests should put their data. * * <p>This method throws an exception if the bucket was not configured. Tests should use {@link * #assumeCredentialsAvailable()} to skip tests when credentials are not available. */ public static String getTestBucketUriWithScheme(String scheme) { if (S3_TEST_BUCKET != null) { return scheme + "://" + S3_TEST_BUCKET + "/temp/"; } else { throw new IllegalStateException("S3 test bucket not available"); } }
3.68
flink_SchemaValidator_deriveTableSinkSchema
/** * Derives the table schema for a table sink. A sink ignores a proctime attribute and needs to * track the origin of a rowtime field. * * @deprecated This method combines two separate concepts of table schema and field mapping. * This should be split into two methods once we have support for the corresponding * interfaces (see FLINK-9870). */ @Deprecated public static TableSchema deriveTableSinkSchema(DescriptorProperties properties) { TableSchema.Builder builder = TableSchema.builder(); TableSchema tableSchema = properties.getTableSchema(SCHEMA); for (int i = 0; i < tableSchema.getFieldCount(); i++) { final TableColumn tableColumn = tableSchema.getTableColumns().get(i); final String fieldName = tableColumn.getName(); final DataType dataType = tableColumn.getType(); if (!tableColumn.isPhysical()) { // skip non-physical column continue; } boolean isProctime = properties .getOptionalBoolean(SCHEMA + "." + i + "." + SCHEMA_PROCTIME) .orElse(false); String tsType = SCHEMA + "." + i + "." + ROWTIME_TIMESTAMPS_TYPE; boolean isRowtime = properties.containsKey(tsType); if (!isProctime && !isRowtime) { // check for a aliasing String aliasName = properties .getOptionalString(SCHEMA + "." + i + "." + SCHEMA_FROM) .orElse(fieldName); builder.field(aliasName, dataType); } // only use the rowtime attribute if it references a field else if (isRowtime) { switch (properties.getString(tsType)) { case ROWTIME_TIMESTAMPS_TYPE_VALUE_FROM_FIELD: String field = properties.getString( SCHEMA + "." + i + "." + ROWTIME_TIMESTAMPS_FROM); builder.field(field, dataType); break; // other timestamp strategies require a reverse timestamp extractor to // insert the timestamp into the output default: throw new TableException( format( "Unsupported rowtime type '%s' for sink" + " table schema. Currently only '%s' is supported for table sinks.", dataType, ROWTIME_TIMESTAMPS_TYPE_VALUE_FROM_FIELD)); } } } return builder.build(); }
3.68
dubbo_AbstractJSONImpl_getListOfObjects
/** * Gets a list from an object for the given key, and verifies all entries are objects. If the key * is not present, this returns null. If the value is not a List or an entry is not an object, * throws an exception. */ @Override public List<Map<String, ?>> getListOfObjects(Map<String, ?> obj, String key) { assert obj != null; List<?> list = getList(obj, key); if (list == null) { return null; } return checkObjectList(list); }
3.68
querydsl_SQLExpressions_stddevDistinct
/** * returns the sample standard deviation of expr, a set of numbers. * * @param expr argument * @return stddev(distinct expr) */ public static <T extends Number> WindowOver<T> stddevDistinct(Expression<T> expr) { return new WindowOver<T>(expr.getType(), SQLOps.STDDEV_DISTINCT, expr); }
3.68
framework_AbstractExtension_internalSetParent
/** * Actually sets the parent and calls required listeners. * * @since 7.1 * @param parent * The parent to set */ private void internalSetParent(ClientConnector parent) { ClientConnector oldParent = getParent(); // Send a detach event if the component is currently attached if (isAttached()) { detach(); } // Connect to new parent this.parent = parent; // Send attach event if the component is now attached if (isAttached()) { attach(); } if (oldParent != null) { oldParent.markAsDirty(); } }
3.68
flink_MemorySegment_getLong
/** * Reads a long value (64bit, 8 bytes) from the given position, in the system's native byte * order. This method offers the best speed for long integer reading and should be used unless a * specific byte order is required. In most cases, it suffices to know that the byte order in * which the value is written is the same as the one in which it is read (such as transient * storage in memory, or serialization for I/O and network), making this method the preferable * choice. * * @param index The position from which the value will be read. * @return The long value at the given position. * @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the * segment size minus 8. */ public long getLong(int index) { final long pos = address + index; if (index >= 0 && pos <= addressLimit - 8) { return UNSAFE.getLong(heapMemory, pos); } else if (address > addressLimit) { throw new IllegalStateException("segment has been freed"); } else { // index is in fact invalid throw new IndexOutOfBoundsException(); } }
3.68
shardingsphere-elasticjob_JobFacade_checkJobExecutionEnvironment
/** * Check job execution environment. * * @throws JobExecutionEnvironmentException job execution environment exception */ public void checkJobExecutionEnvironment() throws JobExecutionEnvironmentException { configService.checkMaxTimeDiffSecondsTolerable(); }
3.68
hadoop_Chunk_getRemain
/** * How many bytes remain in the current chunk? * * @return remaining bytes left in the current chunk. * @throws java.io.IOException */ public int getRemain() throws IOException { checkEOF(); return remain; }
3.68
flink_FileSystem_loadFileSystemFactories
/** * Loads the factories for the file systems directly supported by Flink. Aside from the {@link * LocalFileSystem}, these file systems are loaded via Java's service framework. * * @return A map from the file system scheme to corresponding file system factory. */ private static List<FileSystemFactory> loadFileSystemFactories( Collection<Supplier<Iterator<FileSystemFactory>>> factoryIteratorsSuppliers) { final ArrayList<FileSystemFactory> list = new ArrayList<>(); // by default, we always have the local file system factory list.add(new LocalFileSystemFactory()); LOG.debug("Loading extension file systems via services"); for (Supplier<Iterator<FileSystemFactory>> factoryIteratorsSupplier : factoryIteratorsSuppliers) { try { addAllFactoriesToList(factoryIteratorsSupplier.get(), list); } catch (Throwable t) { // catching Throwable here to handle various forms of class loading // and initialization errors ExceptionUtils.rethrowIfFatalErrorOrOOM(t); LOG.error("Failed to load additional file systems via services", t); } } return Collections.unmodifiableList(list); }
3.68
shardingsphere-elasticjob_JobRegistry_isJobRunning
/** * Judge job is running or not. * * @param jobName job name * @return job is running or not */ public boolean isJobRunning(final String jobName) { return jobRunningMap.getOrDefault(jobName, false); }
3.68
morf_InjectMembersRule_apply
/** * @see org.junit.rules.MethodRule#apply(org.junit.runners.model.Statement, org.junit.runners.model.FrameworkMethod, java.lang.Object) */ @Override public Statement apply(final Statement base, final FrameworkMethod method, final Object target) { return new Statement() { @Override public void evaluate() throws Throwable { final List<Module> moduleWithTarget = new ArrayList<>(Arrays.asList(modules)); if (target instanceof Module) { moduleWithTarget.add((Module) target); } synchronized (InjectMembersRule.class) { Guice.createInjector(moduleWithTarget).injectMembers(target); try { base.evaluate(); } finally { ThreadSafeMockingProgress.mockingProgress().reset(); } } } }; }
3.68
flink_ResultPartition_isReleased
/** * Whether this partition is released. * * <p>A partition is released when each subpartition is either consumed and communication is * closed by consumer or failed. A partition is also released if task is cancelled. */ @Override public boolean isReleased() { return isReleased.get(); }
3.68
framework_UIProvider_getTheme
/** * Finds the theme to use for a specific UI. If no specific theme is * required, <code>null</code> is returned. * <p> * The default implementation checks for a @{@link Theme} annotation on the * UI class. * * @param event * the UI create event with information about the UI and the * current request. * @return the name of the theme, or <code>null</code> if the default theme * should be used * */ public String getTheme(UICreateEvent event) { Theme uiTheme = getAnnotationFor(event.getUIClass(), Theme.class); if (uiTheme != null) { return uiTheme.value(); } else { return null; } }
3.68
hadoop_PairedDurationTrackerFactory_asDuration
/** * @return the global duration */ @Override public Duration asDuration() { return firstDuration.asDuration(); }
3.68
hbase_ZKTableArchiveClient_enableHFileBackupAsync
/** * Turn on backups for all HFiles for the given table. * <p> * All deleted hfiles are moved to the archive directory under the table directory, rather than * being deleted. * <p> * If backups are already enabled for this table, does nothing. * <p> * If the table does not exist, the archiving the table's hfiles is still enabled as a future * table with that name may be created shortly. * @param table name of the table to start backing up * @throws IOException if an unexpected exception occurs * @throws KeeperException if zookeeper can't be reached */ public void enableHFileBackupAsync(final byte[] table) throws IOException, KeeperException { createHFileArchiveManager().enableHFileBackup(table).stop(); }
3.68
framework_SingleSelectionModelImpl_doSetSelected
/** * Sets the selected item. If the item is {@code null}, clears the current * selection if any. * * @param item * the selected item or {@code null} to clear selection * @since 8.1 */ protected void doSetSelected(T item) { if (getParent() == null) { throw new IllegalStateException( "Trying to update selection for grid selection model that has been detached from the grid."); } if (selectedItem != null) { getGrid().getDataCommunicator().refresh(selectedItem); } selectedItem = item; if (selectedItem != null) { getGrid().getDataCommunicator().refresh(selectedItem); } }
3.68
framework_FilesystemContainer_addRoot
/** * Adds new root file directory. Adds a file to be included as root file * directory in the <code>FilesystemContainer</code>. * * @param root * the File to be added as root directory. Null values are * ignored. */ public void addRoot(File root) { if (root != null) { final File[] newRoots = new File[roots.length + 1]; for (int i = 0; i < roots.length; i++) { newRoots[i] = roots[i]; } newRoots[roots.length] = root; roots = newRoots; } }
3.68
flink_OptimizerNode_getUniqueFields
/** Gets the FieldSets which are unique in the output of the node. */ public Set<FieldSet> getUniqueFields() { return this.uniqueFields == null ? Collections.<FieldSet>emptySet() : this.uniqueFields; }
3.68
hbase_ZNodePaths_getMetaReplicaIdFromZNode
/** * Parse the meta replicaId from the passed znode * @param znode the name of the znode, does not include baseZNode */ public int getMetaReplicaIdFromZNode(String znode) { return znode.equals(metaZNodePrefix) ? RegionInfo.DEFAULT_REPLICA_ID : Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1)); }
3.68
dubbo_ServiceDefinitionBuilder_build
/** * Describe a Java interface in {@link ServiceDefinition}. * * @return Service description */ public static ServiceDefinition build(final Class<?> interfaceClass) { ServiceDefinition sd = new ServiceDefinition(); build(sd, interfaceClass); return sd; }
3.68
hbase_StoreFileWriter_build
/** * Create a store file writer. Client is responsible for closing file when done. If metadata, * add BEFORE closing using {@link StoreFileWriter#appendMetadata}. */ public StoreFileWriter build() throws IOException { if ((dir == null ? 0 : 1) + (filePath == null ? 0 : 1) != 1) { throw new IllegalArgumentException("Either specify parent directory " + "or file path"); } if (dir == null) { dir = filePath.getParent(); } if (!fs.exists(dir)) { // Handle permission for non-HDFS filesystem properly // See HBASE-17710 HRegionFileSystem.mkdirs(fs, conf, dir); } // set block storage policy for temp path String policyName = this.conf.get(ColumnFamilyDescriptorBuilder.STORAGE_POLICY); if (null == policyName) { policyName = this.conf.get(HStore.BLOCK_STORAGE_POLICY_KEY); } CommonFSUtils.setStoragePolicy(this.fs, dir, policyName); if (filePath == null) { // The stored file and related blocks will used the directory based StoragePolicy. // Because HDFS DistributedFileSystem does not support create files with storage policy // before version 3.3.0 (See HDFS-13209). Use child dir here is to make stored files // satisfy the specific storage policy when writing. So as to avoid later data movement. // We don't want to change whole temp dir to 'fileStoragePolicy'. if (!Strings.isNullOrEmpty(fileStoragePolicy)) { dir = new Path(dir, HConstants.STORAGE_POLICY_PREFIX + fileStoragePolicy); if (!fs.exists(dir)) { HRegionFileSystem.mkdirs(fs, conf, dir); LOG.info( "Create tmp dir " + dir.toString() + " with storage policy: " + fileStoragePolicy); } CommonFSUtils.setStoragePolicy(this.fs, dir, fileStoragePolicy); } filePath = getUniqueFile(fs, dir); if (!BloomFilterFactory.isGeneralBloomEnabled(conf)) { bloomType = BloomType.NONE; } } // make sure we call this before actually create the writer // in fact, it is not a big deal to even add an inexistent file to the track, as we will never // try to delete it and finally we will clean the tracker up after compaction. But if the file // cleaner find the file but we haven't recorded it yet, it may accidentally delete the file // and cause problem. if (writerCreationTracker != null) { writerCreationTracker.accept(filePath); } return new StoreFileWriter(fs, filePath, conf, cacheConf, bloomType, maxKeyCount, favoredNodes, fileContext, shouldDropCacheBehind, compactedFilesSupplier); }
3.68
flink_BinaryStringData_toLowerCase
/** * Converts all of the characters in this {@code BinaryStringData} to lower case. * * @return the {@code BinaryStringData}, converted to lowercase. */ public BinaryStringData toLowerCase() { if (javaObject != null) { return javaToLowerCase(); } if (binarySection.sizeInBytes == 0) { return EMPTY_UTF8; } int size = binarySection.segments[0].size(); BinaryStringData.SegmentAndOffset segmentAndOffset = startSegmentAndOffset(size); byte[] bytes = new byte[binarySection.sizeInBytes]; bytes[0] = (byte) Character.toTitleCase(segmentAndOffset.value()); for (int i = 0; i < binarySection.sizeInBytes; i++) { byte b = segmentAndOffset.value(); if (numBytesForFirstByte(b) != 1) { // fallback return javaToLowerCase(); } int lower = Character.toLowerCase((int) b); if (lower > 127) { // fallback return javaToLowerCase(); } bytes[i] = (byte) lower; segmentAndOffset.nextByte(size); } return fromBytes(bytes); }
3.68
hadoop_CommitUtilsWithMR_getTempTaskAttemptPath
/** * Compute the path where the output of a given task attempt will be placed. * @param context task context * @param jobUUID unique Job ID. * @param out output directory of job * @return the path to store temporary job attempt data. */ public static Path getTempTaskAttemptPath(TaskAttemptContext context, final String jobUUID, Path out) { return new Path( getTempJobAttemptPath(jobUUID, out, getAppAttemptId(context)), String.valueOf(context.getTaskAttemptID())); }
3.68
hbase_ProcedureExecutor_getProcedures
/** * Get procedures. * @return the procedures in a list */ public List<Procedure<TEnvironment>> getProcedures() { List<Procedure<TEnvironment>> procedureList = new ArrayList<>(procedures.size() + completed.size()); procedureList.addAll(procedures.values()); // Note: The procedure could show up twice in the list with different state, as // it could complete after we walk through procedures list and insert into // procedureList - it is ok, as we will use the information in the Procedure // to figure it out; to prevent this would increase the complexity of the logic. completed.values().stream().map(CompletedProcedureRetainer::getProcedure) .forEach(procedureList::add); return procedureList; }
3.68
flink_PushCalcPastChangelogNormalizeRule_projectUsedFieldsWithConditions
/** * Builds a new {@link StreamPhysicalCalc} on the input node with the given {@param conditions} * and a used fields projection. */ private StreamPhysicalCalc projectUsedFieldsWithConditions( RelBuilder relBuilder, RelNode input, List<RexNode> conditions, int[] usedFields) { final RelDataType inputRowType = input.getRowType(); final List<String> inputFieldNames = inputRowType.getFieldNames(); final RexProgramBuilder programBuilder = new RexProgramBuilder(inputRowType, relBuilder.getRexBuilder()); // add project for (int fieldIndex : usedFields) { programBuilder.addProject( programBuilder.makeInputRef(fieldIndex), inputFieldNames.get(fieldIndex)); } // add conditions final RexNode condition = relBuilder.and(conditions); if (!condition.isAlwaysTrue()) { programBuilder.addCondition(condition); } final RexProgram newProgram = programBuilder.getProgram(); return new StreamPhysicalCalc( input.getCluster(), input.getTraitSet(), input, newProgram, newProgram.getOutputRowType()); }
3.68
hadoop_FilterFileSystem_listStatus
/** List files in a directory. */ @Override public FileStatus[] listStatus(Path f) throws IOException { return fs.listStatus(f); }
3.68
framework_Color_withinRange
/** * Checks whether the value is within the acceptable range of [0, 255]. * * @param value * @return true if the value falls within the range, false otherwise */ private boolean withinRange(int value) { if (value < 0 || value > 255) { return false; } return true; }
3.68
hbase_BulkLoadHFilesTool_groupOrSplitPhase
/** * @param conn the HBase cluster connection * @param tableName the table name of the table to load into * @param pool the ExecutorService * @param queue the queue for LoadQueueItem * @param startEndKeys start and end keys * @return A map that groups LQI by likely bulk load region targets and Set of missing hfiles. */ private Pair<Multimap<ByteBuffer, LoadQueueItem>, Set<String>> groupOrSplitPhase( AsyncClusterConnection conn, TableName tableName, ExecutorService pool, Deque<LoadQueueItem> queue, List<Pair<byte[], byte[]>> startEndKeys) throws IOException { // <region start key, LQI> need synchronized only within this scope of this // phase because of the puts that happen in futures. Multimap<ByteBuffer, LoadQueueItem> rgs = HashMultimap.create(); final Multimap<ByteBuffer, LoadQueueItem> regionGroups = Multimaps.synchronizedMultimap(rgs); Set<String> missingHFiles = new HashSet<>(); Pair<Multimap<ByteBuffer, LoadQueueItem>, Set<String>> pair = new Pair<>(regionGroups, missingHFiles); // drain LQIs and figure out bulk load groups Set<Future<Pair<List<LoadQueueItem>, String>>> splittingFutures = new HashSet<>(); while (!queue.isEmpty()) { final LoadQueueItem item = queue.remove(); final Callable<Pair<List<LoadQueueItem>, String>> call = () -> groupOrSplit(conn, tableName, regionGroups, item, startEndKeys); splittingFutures.add(pool.submit(call)); } // get all the results. All grouping and splitting must finish before // we can attempt the atomic loads. for (Future<Pair<List<LoadQueueItem>, String>> lqis : splittingFutures) { try { Pair<List<LoadQueueItem>, String> splits = lqis.get(); if (splits != null) { if (splits.getFirst() != null) { queue.addAll(splits.getFirst()); } else { missingHFiles.add(splits.getSecond()); } } } catch (ExecutionException e1) { Throwable t = e1.getCause(); if (t instanceof IOException) { LOG.error("IOException during splitting", e1); throw (IOException) t; // would have been thrown if not parallelized, } LOG.error("Unexpected execution exception during splitting", e1); throw new IllegalStateException(t); } catch (InterruptedException e1) { LOG.error("Unexpected interrupted exception during splitting", e1); throw (InterruptedIOException) new InterruptedIOException().initCause(e1); } } return pair; }
3.68
flink_InputGate_getAvailableFuture
/** * @return a future that is completed if there are more records available. If there are more * records available immediately, {@link #AVAILABLE} should be returned. Previously returned * not completed futures should become completed once there are more records available. */ @Override public CompletableFuture<?> getAvailableFuture() { return availabilityHelper.getAvailableFuture(); }
3.68
querydsl_SQLTemplatesRegistry_getTemplates
/** * Get the SQLTemplates instance that matches best the SQL engine of the * given database metadata * * @param md database metadata * @return templates * @throws SQLException */ public SQLTemplates getTemplates(DatabaseMetaData md) throws SQLException { return getBuilder(md).build(); }
3.68
hadoop_DataNodeFaultInjector_interceptBlockReader
/** * Used as a hook to inject intercept When finish reading from block. */ public void interceptBlockReader() {}
3.68
framework_Window_isResizeLazy
/** * * @return true if a delay is used before recalculating sizes, false if * sizes are recalculated immediately. */ public boolean isResizeLazy() { return getState(false).resizeLazy; }
3.68
flink_RichInputFormat_closeInputFormat
/** * Closes this InputFormat instance. This method is called once per parallel instance. Resources * allocated during {@link #openInputFormat()} should be closed in this method. * * @see InputFormat * @throws IOException in case closing the resources failed */ @PublicEvolving public void closeInputFormat() throws IOException { // do nothing here, just for subclasses }
3.68
flink_WatermarkStrategy_forMonotonousTimestamps
/** * Creates a watermark strategy for situations with monotonously ascending timestamps. * * <p>The watermarks are generated periodically and tightly follow the latest timestamp in the * data. The delay introduced by this strategy is mainly the periodic interval in which the * watermarks are generated. * * @see AscendingTimestampsWatermarks */ static <T> WatermarkStrategy<T> forMonotonousTimestamps() { return (ctx) -> new AscendingTimestampsWatermarks<>(); }
3.68
hudi_HoodieTableConfig_getRecordMergerStrategy
/** * Read the payload class for HoodieRecords from the table properties. */ public String getRecordMergerStrategy() { return getStringOrDefault(RECORD_MERGER_STRATEGY); }
3.68
pulsar_ProducerConfiguration_setMessageRoutingMode
/** * Set the message routing mode for the partitioned producer. * * @param messageRouteMode message routing mode. * @return producer configuration * @see MessageRoutingMode */ public ProducerConfiguration setMessageRoutingMode(MessageRoutingMode messageRouteMode) { Objects.requireNonNull(messageRouteMode); conf.setMessageRoutingMode( org.apache.pulsar.client.api.MessageRoutingMode.valueOf(messageRouteMode.toString())); return this; }
3.68
morf_AbstractSqlDialectTest_tableName
/** * Many tests have common results apart from a table name decoration. This method allows for * those tests to be commonised and save a lot of duplication between descendent classes. * * <p>If no decoration is required for an SQL dialect descendant classes need not implement this method.</p> * * @param baseName Base table name. * @return Decorated name. */ protected String tableName(String baseName) { return baseName; }
3.68
framework_Table_addItem
/** * Adds the new row to table and fill the visible cells (except generated * columns) with given values. * * @param cells * the Object array that is used for filling the visible cells * new row. The types must be settable to visible column property * types. * @param itemId * the Id the new row. If null, a new id is automatically * assigned. If given, the table cannot already have a item with * given id. * @return Returns item id for the new row. Returns null if operation fails. */ public Object addItem(Object[] cells, Object itemId) throws UnsupportedOperationException { // remove generated columns from the list of columns being assigned final LinkedList<Object> availableCols = new LinkedList<Object>(); for (Object id : visibleColumns) { if (!columnGenerators.containsKey(id)) { availableCols.add(id); } } // Checks that a correct number of cells are given if (cells.length != availableCols.size()) { return null; } // Creates new item Item item; if (itemId == null) { itemId = items.addItem(); if (itemId == null) { return null; } item = items.getItem(itemId); } else { item = items.addItem(itemId); } if (item == null) { return null; } // Fills the item properties for (int i = 0; i < availableCols.size(); i++) { item.getItemProperty(availableCols.get(i)).setValue(cells[i]); } if (!(items instanceof Container.ItemSetChangeNotifier)) { refreshRowCache(); } return itemId; }
3.68
framework_AbstractComponent_isOrHasAncestor
/** * Determine whether a <code>content</code> component is equal to, or the * ancestor of this component. * * @param content * the potential ancestor element * @return <code>true</code> if the relationship holds */ protected boolean isOrHasAncestor(Component content) { if (content instanceof HasComponents) { for (Component parent = this; parent != null; parent = parent .getParent()) { if (parent.equals(content)) { return true; } } } return false; }
3.68
dubbo_TripleServerStream_responseErr
/** * Error in create stream, unsupported config or triple protocol error. There is no return value * because stream will be reset if send trailers failed. * * @param status status of error */ private void responseErr(TriRpcStatus status) { Http2Headers trailers = new DefaultHttp2Headers() .status(OK.codeAsText()) .set(HttpHeaderNames.CONTENT_TYPE, TripleConstant.CONTENT_PROTO) .setInt(TripleHeaderEnum.STATUS_KEY.getHeader(), status.code.code) .set(TripleHeaderEnum.MESSAGE_KEY.getHeader(), status.toEncodedMessage()); sendTrailers(trailers); }
3.68
flink_FlinkContainers_start
/** Starts all containers. */ public void start() throws Exception { if (haService != null) { LOG.debug("Starting HA service container"); this.haService.start(); } LOG.debug("Starting JobManager container"); this.jobManager.start(); waitUntilJobManagerRESTReachable(jobManager); LOG.debug("Starting TaskManager containers"); this.taskManagers.parallelStream().forEach(GenericContainer::start); LOG.debug("Creating REST cluster client"); this.restClusterClient = createClusterClient(); waitUntilAllTaskManagerConnected(); isStarted = true; }
3.68
hbase_NamespaceStateManager_deleteNamespace
/** * Delete the namespace state. * @param namespace the name of the namespace to delete */ void deleteNamespace(String namespace) { this.nsStateCache.remove(namespace); }
3.68
dubbo_MetadataInfo_init
/** * Initialize necessary caches right after deserialization on the consumer side */ protected void init() { buildMatchKey(); buildServiceKey(name, group, version); // init method params this.methodParams = URLParam.initMethodParameters(params); // Actually, consumer params is empty after deserialized on the consumer side, so no need to initialize. // Check how InstanceAddressURL operates on consumer url for more detail. // this.consumerMethodParams = URLParam.initMethodParameters(consumerParams); // no need to init numbers for it's only for cache purpose }
3.68
hbase_AsyncTable_deleteAll
/** * A simple version of batch delete. It will fail if there are any failures. * @param deletes list of things to delete. * @return A {@link CompletableFuture} that always returns null when complete normally. */ default CompletableFuture<Void> deleteAll(List<Delete> deletes) { return allOf(delete(deletes)).thenApply(r -> null); }
3.68
querydsl_JTSGeometryExpression_boundary
/** * Returns the closure of the combinatorial boundary of this geometric object * * @return boundary */ public JTSGeometryExpression<Geometry> boundary() { if (boundary == null) { boundary = JTSGeometryExpressions.geometryOperation(SpatialOps.BOUNDARY, mixin); } return boundary; }
3.68
framework_GridConnector_updateColumnsFromState
/** * Update columns from the current state. * */ private void updateColumnsFromState() { this.columnsUpdatedFromState = true; final List<Column<?, JsonObject>> columns = new ArrayList<Column<?, JsonObject>>(getState().columns.size()); for (String columnId : getState().columnOrder) { for (GridColumnState state : getState().columns) { if (state.id.equals(columnId)) { CustomGridColumn column = this.columnIdToColumn.get(state.id); if (column == null) { column = new CustomGridColumn(state); this.columnIdToColumn.put(state.id, column); this.columnOrder.add(state.id); columns.add(column); } else { updateColumnFromState(column, state); } } } } @SuppressWarnings("unchecked") final Column<?, JsonObject>[] columnArray = columns.toArray(new Column[0]); getWidget().addColumns(columnArray); this.columnsUpdatedFromState = false; }
3.68
hbase_HbckTableInfo_getTableDescriptor
/** Returns descriptor common to all regions. null if are none or multiple! */ TableDescriptor getTableDescriptor() { if (htds.size() == 1) { return (TableDescriptor) htds.toArray()[0]; } else { LOG.error( "None/Multiple table descriptors found for table '" + tableName + "' regions: " + htds); } return null; }
3.68
flink_WindowMapState_entries
/** * Returns all the mappings in the state. * * @return An iterable view of all the key-value pairs in the state. * @throws Exception Thrown if the system cannot access the state. */ public Iterable<Map.Entry<RowData, UV>> entries(W window) throws Exception { windowState.setCurrentNamespace(window); return windowState.entries(); }
3.68
hbase_AbstractFSWAL_init
/** * Used to initialize the WAL. Usually just call rollWriter to create the first log writer. */ @Override public void init() throws IOException { rollWriter(); }
3.68
graphhopper_NodeBasedNodeContractor_calculatePriority
/** * Warning: the calculated priority must NOT depend on priority(v) and therefore findAndHandleShortcuts should also not * depend on the priority(v). Otherwise updating the priority before contracting in contractNodes() could lead to * a slowish or even endless loop. */ @Override public float calculatePriority(int node) { // # huge influence: the bigger the less shortcuts gets created and the faster is the preparation // // every adjNode has an 'original edge' number associated. initially it is r=1 // when a new shortcut is introduced then r of the associated edges is summed up: // r(u,w)=r(u,v)+r(v,w) now we can define // originalEdgesCount = σ(v) := sum_{ (u,w) ∈ shortcuts(v) } of r(u, w) shortcutsCount = 0; originalEdgesCount = 0; findAndHandleShortcuts(node, this::countShortcuts, (int) (meanDegree * params.maxPollFactorHeuristic)); // from shortcuts we can compute the edgeDifference // # low influence: with it the shortcut creation is slightly faster // // |shortcuts(v)| − |{(u, v) | v uncontracted}| − |{(v, w) | v uncontracted}| // meanDegree is used instead of outDegree+inDegree as if one adjNode is in both directions // only one bucket memory is used. Additionally one shortcut could also stand for two directions. int edgeDifference = shortcutsCount - prepareGraph.getDegree(node); // according to the paper do a simple linear combination of the properties to get the priority. return params.edgeDifferenceWeight * edgeDifference + params.originalEdgesCountWeight * originalEdgesCount; // todo: maybe use contracted-neighbors heuristic (contract nodes with lots of contracted neighbors later) as in GH 1.0 again? // maybe use hierarchy-depths heuristic as in edge-based? }
3.68