name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
framework_HierarchicalContainer_areChildrenAllowed
/* * Can the specified Item have any children? Don't add a JavaDoc comment * here, we use the default documentation from implemented interface. */ @Override public boolean areChildrenAllowed(Object itemId) { if (noChildrenAllowed.contains(itemId)) { return false; } return containsId(itemId); }
3.68
flink_AbstractColumnReader_afterReadPage
/** After read a page, we may need some initialization. */ protected void afterReadPage() {}
3.68
hbase_MetricsConnection_incrDelayRunnersAndUpdateDelayInterval
/** Increment the number of delay runner counts and update delay interval of delay runner. */ public void incrDelayRunnersAndUpdateDelayInterval(long interval) { this.runnerStats.incrDelayRunners(); this.runnerStats.updateDelayInterval(interval); }
3.68
morf_AbstractSqlDialectTest_testSelectDistinctForUpdate
/** * Tests that we can't combine DISTINCT with FOR UPDATE */ @Test(expected = IllegalArgumentException.class) public void testSelectDistinctForUpdate() { SelectStatement stmt = selectDistinct().from(new TableReference(TEST_TABLE)).forUpdate(); testDialect.convertStatementToSQL(stmt); }
3.68
framework_VaadinService_getHeartbeatTimeout
/** * Returns the number of seconds that must pass without a valid heartbeat or * UIDL request being received from a UI before that UI is removed from its * session. This is a lower bound; it might take longer to close an inactive * UI. Returns a negative number if heartbeat is disabled and timeout never * occurs. * * @see DeploymentConfiguration#getHeartbeatInterval() * * @since 7.0.0 * * @return The heartbeat timeout in seconds or a negative number if timeout * never occurs. */ private int getHeartbeatTimeout() { // Permit three missed heartbeats before closing the UI return (int) (getDeploymentConfiguration().getHeartbeatInterval() * (3.1)); }
3.68
framework_AbstractInMemoryContainer_filterAll
/** * Filter the view to recreate the visible item list from the unfiltered * items, and send a notification if the set of visible items changed in any * way. */ protected void filterAll() { if (doFilterContainer(!getFilters().isEmpty())) { fireItemSetChange(); } }
3.68
hadoop_ApplicationServiceRecordProcessor_createTXTInfo
/** * Create an application TXT record descriptor. * * @param serviceRecord the service record. * @throws Exception if there is an issue during descriptor creation. */ protected void createTXTInfo(ServiceRecord serviceRecord) throws Exception { List<Endpoint> endpoints = serviceRecord.external; List<RecordDescriptor> recordDescriptors = new ArrayList<>(); TXTApplicationRecordDescriptor txtInfo; for (Endpoint endpoint : endpoints) { txtInfo = new TXTApplicationRecordDescriptor( serviceRecord, endpoint); recordDescriptors.add(txtInfo); } registerRecordDescriptor(Type.TXT, recordDescriptors); }
3.68
morf_WithMetaDataAdapter_getSchema
/** * {@inheritDoc} * * @see org.alfasoftware.morf.dataset.DataSetProducer#getSchema() */ @Override public Schema getSchema() { final Schema targetSchema = schemaProducer.getSchema(); final Schema sourceSchema = super.getSchema(); if (schema == null) { schema = new Schema() { @Override public Table getTable(String name) { return targetSchema.getTable(name); } @Override public boolean isEmptyDatabase() { return targetSchema.isEmptyDatabase(); } @Override public boolean tableExists(String name) { return targetSchema.tableExists(name); } @Override public Collection<String> tableNames() { return sourceSchema.tableNames(); } @Override public Collection<Table> tables() { Set<Table> tables = new HashSet<>(); for (String tableName : tableNames()) { tables.add(getTable(tableName)); } return tables; } @Override public boolean viewExists(String name) { return targetSchema.viewExists(name); } @Override public View getView(String name) { return targetSchema.getView(name); } @Override public Collection<String> viewNames() { return targetSchema.viewNames(); } @Override public Collection<View> views() { return targetSchema.views(); } }; } return schema; }
3.68
framework_Validator_isInvisible
/** * Check if the error message should be hidden. * * An empty (null or "") message is invisible unless it contains nested * exceptions that are visible. * * @return true if the error message should be hidden, false otherwise */ public boolean isInvisible() { String msg = getMessage(); if (msg != null && !msg.isEmpty()) { return false; } if (causes != null) { for (InvalidValueException e : causes) { if (!e.isInvisible()) { return false; } } } return true; }
3.68
hbase_RequestConverter_buildEnableTableRequest
/** * Creates a protocol buffer EnableTableRequest * @return an EnableTableRequest */ public static EnableTableRequest buildEnableTableRequest(final TableName tableName, final long nonceGroup, final long nonce) { EnableTableRequest.Builder builder = EnableTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setNonceGroup(nonceGroup); builder.setNonce(nonce); return builder.build(); }
3.68
flink_MetadataOutputStreamWrapper_close
/** * The function will check output stream valid. If it has been closed before, it will do * nothing. If not, it will invoke {@code closeAction()} and mark it closed. */ final void close() throws IOException { if (closed) { return; } closeAction(); closed = true; }
3.68
pulsar_CompletableFutureCancellationHandler_createFuture
/** * Creates a new {@link CompletableFuture} and attaches the cancellation handler * to handle cancels and timeouts. * * @param <T> the result type of the future * @return a new future instance */ public <T> CompletableFuture<T> createFuture() { CompletableFuture<T> future = new CompletableFuture<>(); attachToFuture(future); return future; }
3.68
flink_LogicalSnapshot_create
/** Creates a LogicalSnapshot. */ public static LogicalSnapshot create(RelNode input, RexNode period) { final RelOptCluster cluster = input.getCluster(); final RelMetadataQuery mq = cluster.getMetadataQuery(); final RelTraitSet traitSet = cluster.traitSet() .replace(Convention.NONE) .replaceIfs( RelCollationTraitDef.INSTANCE, () -> RelMdCollation.snapshot(mq, input)) .replaceIf( RelDistributionTraitDef.INSTANCE, () -> RelMdDistribution.snapshot(mq, input)); return new LogicalSnapshot(cluster, traitSet, input, period); }
3.68
open-banking-gateway_PsuEncryptionServiceProvider_forPublicAndPrivateKey
/** * Public and Private key (read/write) encryption. * @param keyId Key ID * @param key Public-Private key pair * @return Encryption service for both reading and writing */ public EncryptionService forPublicAndPrivateKey(UUID keyId, PubAndPrivKey key) { return oper.encryptionService(keyId.toString(), key.getPrivateKey(), key.getPublicKey()); }
3.68
hudi_BufferedRandomAccessFile_write
/** * Write specified number of bytes into buffer/file, with given starting offset and length. * @param b - byte array with data to be written * @param off - starting offset. * @param len - length of bytes to be written * @throws IOException */ @Override public void write(byte[] b, int off, int len) throws IOException { // As all data may not fit into the buffer, more than one write would be required. while (len > 0) { int n = this.writeAtMost(b, off, len); off += n; len -= n; this.isDirty = true; } }
3.68
hbase_HRegion_flushcache
/** * Flush the cache. When this method is called the cache will be flushed unless: * <ol> * <li>the cache is empty</li> * <li>the region is closed.</li> * <li>a flush is already in progress</li> * <li>writes are disabled</li> * </ol> * <p> * This method may block for some time, so it should not be called from a time-sensitive thread. * @param families stores of region to flush. * @param writeFlushRequestWalMarker whether to write the flush request marker to WAL * @param tracker used to track the life cycle of this flush * @return whether the flush is success and whether the region needs compacting * @throws IOException general io exceptions * @throws DroppedSnapshotException Thrown when replay of wal is required because a Snapshot was * not properly persisted. The region is put in closing mode, and * the caller MUST abort after this. */ public FlushResultImpl flushcache(List<byte[]> families, boolean writeFlushRequestWalMarker, FlushLifeCycleTracker tracker) throws IOException { // fail-fast instead of waiting on the lock if (this.closing.get()) { String msg = "Skipping flush on " + this + " because closing"; LOG.debug(msg); return new FlushResultImpl(FlushResult.Result.CANNOT_FLUSH, msg, false); } MonitoredTask status = TaskMonitor.get().createStatus("Flushing " + this); status.setStatus("Acquiring readlock on region"); // block waiting for the lock for flushing cache lock.readLock().lock(); boolean flushed = true; try { if (this.closed.get()) { String msg = "Skipping flush on " + this + " because closed"; LOG.debug(msg); status.abort(msg); flushed = false; return new FlushResultImpl(FlushResult.Result.CANNOT_FLUSH, msg, false); } if (coprocessorHost != null) { status.setStatus("Running coprocessor pre-flush hooks"); coprocessorHost.preFlush(tracker); } // TODO: this should be managed within memstore with the snapshot, updated only after flush // successful if (numMutationsWithoutWAL.sum() > 0) { numMutationsWithoutWAL.reset(); dataInMemoryWithoutWAL.reset(); } synchronized (writestate) { if (!writestate.flushing && writestate.writesEnabled) { this.writestate.flushing = true; } else { String msg = "NOT flushing " + this + " as " + (writestate.flushing ? "already flushing" : "writes are not enabled"); LOG.debug(msg); status.abort(msg); flushed = false; return new FlushResultImpl(FlushResult.Result.CANNOT_FLUSH, msg, false); } } try { // The reason that we do not always use flushPolicy is, when the flush is // caused by logRoller, we should select stores which must be flushed // rather than could be flushed. Collection<HStore> specificStoresToFlush = null; if (families != null) { specificStoresToFlush = getSpecificStores(families); } else { specificStoresToFlush = flushPolicy.selectStoresToFlush(); } FlushResultImpl fs = internalFlushcache(specificStoresToFlush, status, writeFlushRequestWalMarker, tracker); if (coprocessorHost != null) { status.setStatus("Running post-flush coprocessor hooks"); coprocessorHost.postFlush(tracker); } if (fs.isFlushSucceeded()) { flushesQueued.reset(); } status.markComplete("Flush successful " + fs.toString()); return fs; } finally { synchronized (writestate) { writestate.flushing = false; this.writestate.flushRequested = false; writestate.notifyAll(); } } } finally { lock.readLock().unlock(); if (flushed) { // Don't log this journal stuff if no flush -- confusing. LOG.debug("Flush status journal for {}:\n{}", this.getRegionInfo().getEncodedName(), status.prettyPrintJournal()); } status.cleanup(); } }
3.68
hbase_BulkLoadCellFilter_filterCell
/** * Filters the bulk load cell using the supplied predicate. * @param cell The WAL cell to filter. * @param famPredicate Returns true of given family should be removed. * @return The filtered cell. */ public Cell filterCell(Cell cell, Predicate<byte[]> famPredicate) { byte[] fam; BulkLoadDescriptor bld = null; try { bld = WALEdit.getBulkLoadDescriptor(cell); } catch (IOException e) { LOG.warn("Failed to get bulk load events information from the WAL file.", e); return cell; } List<StoreDescriptor> storesList = bld.getStoresList(); // Copy the StoreDescriptor list and update it as storesList is a unmodifiableList List<StoreDescriptor> copiedStoresList = new ArrayList<>(storesList); Iterator<StoreDescriptor> copiedStoresListIterator = copiedStoresList.iterator(); boolean anyStoreRemoved = false; while (copiedStoresListIterator.hasNext()) { StoreDescriptor sd = copiedStoresListIterator.next(); fam = sd.getFamilyName().toByteArray(); if (famPredicate.apply(fam)) { copiedStoresListIterator.remove(); anyStoreRemoved = true; } } if (!anyStoreRemoved) { return cell; } else if (copiedStoresList.isEmpty()) { return null; } BulkLoadDescriptor.Builder newDesc = BulkLoadDescriptor.newBuilder() .setTableName(bld.getTableName()).setEncodedRegionName(bld.getEncodedRegionName()) .setBulkloadSeqNum(bld.getBulkloadSeqNum()); newDesc.addAllStores(copiedStoresList); BulkLoadDescriptor newBulkLoadDescriptor = newDesc.build(); return cellBuilder.clear().setRow(CellUtil.cloneRow(cell)).setFamily(WALEdit.METAFAMILY) .setQualifier(WALEdit.BULK_LOAD).setTimestamp(cell.getTimestamp()).setType(cell.getTypeByte()) .setValue(newBulkLoadDescriptor.toByteArray()).build(); }
3.68
querydsl_NumberExpression_min
/** * Create a {@code min(this)} expression * * <p>Get the minimum value of this expression (aggregation)</p> * * @return min(this) */ @Override public NumberExpression<T> min() { if (min == null) { min = Expressions.numberOperation(getType(), Ops.AggOps.MIN_AGG, mixin); } return min; }
3.68
hbase_WALSplitUtil_hasRecoveredEdits
/** * Check whether there is recovered.edits in the region dir * @param conf conf * @param regionInfo the region to check * @return true if recovered.edits exist in the region dir */ public static boolean hasRecoveredEdits(final Configuration conf, final RegionInfo regionInfo) throws IOException { // No recovered.edits for non default replica regions if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { return false; } // Only default replica region can reach here, so we can use regioninfo // directly without converting it to default replica's regioninfo. Path regionWALDir = CommonFSUtils.getWALRegionDir(conf, regionInfo.getTable(), regionInfo.getEncodedName()); Path regionDir = FSUtils.getRegionDirFromRootDir(CommonFSUtils.getRootDir(conf), regionInfo); Path wrongRegionWALDir = CommonFSUtils.getWrongWALRegionDir(conf, regionInfo.getTable(), regionInfo.getEncodedName()); FileSystem walFs = CommonFSUtils.getWALFileSystem(conf); FileSystem rootFs = CommonFSUtils.getRootDirFileSystem(conf); NavigableSet<Path> files = getSplitEditFilesSorted(walFs, regionWALDir); if (!files.isEmpty()) { return true; } files = getSplitEditFilesSorted(rootFs, regionDir); if (!files.isEmpty()) { return true; } files = getSplitEditFilesSorted(walFs, wrongRegionWALDir); return !files.isEmpty(); }
3.68
framework_FileDownloader_extend
/** * Add this extension to the {@code EventTrigger}. * * @param eventTrigger * the trigger to attach this extension to * @since 8.4 */ public void extend(EventTrigger eventTrigger) { super.extend(eventTrigger.getConnector()); getState().partInformation = eventTrigger.getPartInformation(); }
3.68
framework_VCalendarPanel_setRangeStart
/** * Sets the start range for this component. The start range is inclusive, * and it depends on the current resolution, what is considered inside the * range. * * @param newRangeStart * - the allowed range's start date */ public void setRangeStart(Date newRangeStart) { if (!SharedUtil.equals(rangeStart, newRangeStart)) { rangeStart = newRangeStart; if (initialRenderDone) { // Dynamic updates to the range needs to render the calendar to // update the element stylenames renderCalendar(); } } }
3.68
hadoop_UpdateContainerSchedulerEvent_isResourceChange
/** * isResourceChange. * @return isResourceChange. */ public boolean isResourceChange() { return containerEvent.isResourceChange(); }
3.68
hadoop_ReplicaUnderConstruction_setChosenAsPrimary
/** * Set whether this replica was chosen for recovery. */ void setChosenAsPrimary(boolean chosenAsPrimary) { this.chosenAsPrimary = chosenAsPrimary; }
3.68
hbase_Mutation_toMap
/** * Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a * Map along with the fingerprinted information. Useful for debugging, logging, and administration * tools. * @param maxCols a limit on the number of columns output prior to truncation */ @Override public Map<String, Object> toMap(int maxCols) { // we start with the fingerprint map and build on top of it. Map<String, Object> map = getFingerprint(); // replace the fingerprint's simple list of families with a // map from column families to lists of qualifiers and kv details Map<String, List<Map<String, Object>>> columns = new HashMap<>(); map.put("families", columns); map.put("row", Bytes.toStringBinary(this.row)); int colCount = 0; // iterate through all column families affected for (Map.Entry<byte[], List<Cell>> entry : getFamilyCellMap().entrySet()) { // map from this family to details for each cell affected within the family List<Map<String, Object>> qualifierDetails = new ArrayList<>(); columns.put(Bytes.toStringBinary(entry.getKey()), qualifierDetails); colCount += entry.getValue().size(); if (maxCols <= 0) { continue; } // add details for each cell for (Cell cell : entry.getValue()) { if (--maxCols <= 0) { continue; } Map<String, Object> cellMap = cellToStringMap(cell); qualifierDetails.add(cellMap); } } map.put("totalColumns", colCount); // add the id if set if (getId() != null) { map.put("id", getId()); } // Add the TTL if set // Long.MAX_VALUE is the default, and is interpreted to mean this attribute // has not been set. if (getTTL() != Long.MAX_VALUE) { map.put("ttl", getTTL()); } map.put("ts", this.ts); return map; }
3.68
hbase_WAL_getEdit
/** * Gets the edit */ public WALEdit getEdit() { return edit; }
3.68
hbase_SaslServerAuthenticationProviders_getNumRegisteredProviders
/** * Returns the number of registered providers. */ public int getNumRegisteredProviders() { return providers.size(); }
3.68
flink_DoubleMinimum_add
/** Consider using {@link #add(double)} instead for primitive double values */ @Override public void add(Double value) { this.min = Math.min(this.min, value); }
3.68
hbase_DefaultVisibilityLabelServiceImpl_matchUnSortedVisibilityTags
/** * @param putVisTags Visibility tags in Put Mutation * @param deleteVisTags Visibility tags in Delete Mutation * @return true when all the visibility tags in Put matches with visibility tags in Delete. This * is used when, at least one set of tags are not sorted based on the label ordinal. */ private static boolean matchUnSortedVisibilityTags(List<Tag> putVisTags, List<Tag> deleteVisTags) throws IOException { return compareTagsOrdinals(sortTagsBasedOnOrdinal(putVisTags), sortTagsBasedOnOrdinal(deleteVisTags)); }
3.68
hbase_CellUtil_copyRow
/** * Copies the row to a new byte[] * @param cell the cell from which row has to copied * @return the byte[] containing the row */ public static byte[] copyRow(Cell cell) { if (cell instanceof ByteBufferExtendedCell) { return ByteBufferUtils.copyOfRange(((ByteBufferExtendedCell) cell).getRowByteBuffer(), ((ByteBufferExtendedCell) cell).getRowPosition(), ((ByteBufferExtendedCell) cell).getRowPosition() + cell.getRowLength()); } else { return Arrays.copyOfRange(cell.getRowArray(), cell.getRowOffset(), cell.getRowOffset() + cell.getRowLength()); } }
3.68
flink_OperatorChain_createOperatorChain
/** * Recursively create chain of operators that starts from the given {@param operatorConfig}. * Operators are created tail to head and wrapped into an {@link WatermarkGaugeExposingOutput}. */ private <IN, OUT> WatermarkGaugeExposingOutput<StreamRecord<IN>> createOperatorChain( StreamTask<OUT, ?> containingTask, StreamConfig prevOperatorConfig, StreamConfig operatorConfig, Map<Integer, StreamConfig> chainedConfigs, ClassLoader userCodeClassloader, Map<IntermediateDataSetID, RecordWriterOutput<?>> recordWriterOutputs, List<StreamOperatorWrapper<?, ?>> allOperatorWrappers, OutputTag<IN> outputTag, MailboxExecutorFactory mailboxExecutorFactory, boolean shouldAddMetricForPrevOperator) { // create the output that the operator writes to first. this may recursively create more // operators WatermarkGaugeExposingOutput<StreamRecord<OUT>> chainedOperatorOutput = createOutputCollector( containingTask, operatorConfig, chainedConfigs, userCodeClassloader, recordWriterOutputs, allOperatorWrappers, mailboxExecutorFactory, true); OneInputStreamOperator<IN, OUT> chainedOperator = createOperator( containingTask, operatorConfig, userCodeClassloader, chainedOperatorOutput, allOperatorWrappers, false); return wrapOperatorIntoOutput( chainedOperator, containingTask, prevOperatorConfig, operatorConfig, userCodeClassloader, outputTag, shouldAddMetricForPrevOperator); }
3.68
hadoop_TimelineEntity_addPrimaryFilter
/** * Add a single piece of primary filter to the existing primary filter map * * @param key * the primary filter key * @param value * the primary filter value */ public void addPrimaryFilter(String key, Object value) { Set<Object> thisPrimaryFilter = primaryFilters.get(key); if (thisPrimaryFilter == null) { thisPrimaryFilter = new HashSet<Object>(); primaryFilters.put(key, thisPrimaryFilter); } thisPrimaryFilter.add(value); }
3.68
flink_RocksDBStateBackend_setPriorityQueueStateType
/** * Sets the type of the priority queue state. It will fallback to the default value, if it is * not explicitly set. */ public void setPriorityQueueStateType(PriorityQueueStateType priorityQueueStateType) { rocksDBStateBackend.setPriorityQueueStateType( LegacyEnumBridge.convert(priorityQueueStateType)); }
3.68
framework_VLayoutSlot_getWidget
/** * Returns the widget that this slot contains. * * @return the child widget, cannot be {@code null} */ public Widget getWidget() { return widget; }
3.68
flink_MultipleParameterTool_has
/** Check if value is set. */ @Override public boolean has(String value) { addToDefaults(value, null); unrequestedParameters.remove(value); return data.containsKey(value); }
3.68
flink_ResourceProfile_setExtendedResources
/** * Add the given extended resources. This will discard all the previous added extended * resources. */ public Builder setExtendedResources(Collection<ExternalResource> extendedResources) { this.extendedResources = extendedResources.stream() .collect( Collectors.toMap( ExternalResource::getName, Function.identity())); return this; }
3.68
hadoop_TracingContext_getPrimaryRequestIdForHeader
/** * Provide value to be used as primaryRequestId part of x-ms-client-request-id header. * @param isRetry define if it's for a retry case. * @return {@link #primaryRequestIdForRetry}:If the {@link #primaryRequestId} * is an empty-string, and it's a retry iteration. * {@link #primaryRequestId} for other cases. */ private String getPrimaryRequestIdForHeader(final Boolean isRetry) { if (!primaryRequestId.isEmpty() || !isRetry) { return primaryRequestId; } return primaryRequestIdForRetry; }
3.68
framework_AbstractComponentContainer_addComponents
/* * (non-Javadoc) * * @see * com.vaadin.ui.ComponentContainer#addComponents(com.vaadin.ui.Component[]) */ @Override public void addComponents(Component... components) { for (Component c : components) { addComponent(c); } }
3.68
framework_Escalator_getRowWithFocus
/** * Get the {@literal <tbody>} row that contains (or has) focus. * * @return The {@literal <tbody>} row that contains a focused DOM * element, or <code>null</code> if focus is outside of a body * row. */ private TableRowElement getRowWithFocus() { TableRowElement rowContainingFocus = null; final Element focusedElement = WidgetUtil.getFocusedElement(); if (focusedElement != null && root.isOrHasChild(focusedElement)) { Element e = focusedElement; while (e != null && e != root) { /* * You never know if there's several tables embedded in a * cell... We'll take the deepest one. */ if (TableRowElement.is(e)) { rowContainingFocus = TableRowElement.as(e); } e = e.getParentElement(); } } return rowContainingFocus; }
3.68
querydsl_JTSGeometryExpression_distanceSphere
// TODO maybe move out public NumberExpression<Double> distanceSphere(Expression<? extends Geometry> geometry) { return Expressions.numberOperation(Double.class, SpatialOps.DISTANCE_SPHERE, mixin, geometry); }
3.68
flink_Hardware_getSizeOfPhysicalMemoryForWindows
/** * Returns the size of the physical memory in bytes on Windows. * * @return the size of the physical memory in bytes or {@code -1}, if the size could not be * determined */ private static long getSizeOfPhysicalMemoryForWindows() { BufferedReader bi = null; try { Process proc = Runtime.getRuntime().exec("wmic memorychip get capacity"); bi = new BufferedReader( new InputStreamReader(proc.getInputStream(), StandardCharsets.UTF_8)); String line = bi.readLine(); if (line == null) { return -1L; } if (!line.startsWith("Capacity")) { return -1L; } long sizeOfPhyiscalMemory = 0L; while ((line = bi.readLine()) != null) { if (line.isEmpty()) { continue; } line = line.replaceAll(" ", ""); sizeOfPhyiscalMemory += Long.parseLong(line); } return sizeOfPhyiscalMemory; } catch (Throwable t) { LOG.error( "Cannot determine the size of the physical memory for Windows host " + "(using 'wmic memorychip')", t); return -1L; } finally { if (bi != null) { try { bi.close(); } catch (Throwable ignored) { } } } }
3.68
framework_VTabsheet_hasScrolledTabs
/** * Checks whether there are any tabs scrolled out of view that could be * scrolled back into (not hidden on the server). If no such tabs are * scrolled out, this check returns {@code false}. Disabled but * visible-on-server tabs count as viewable. * * @return {@code true} if any viewable tabs are scrolled out of view, * {@code false} otherwise */ private boolean hasScrolledTabs() { return scrollerIndex > 0 && scrollerIndex > tb.getFirstVisibleTab(); }
3.68
framework_Escalator_getMinCellWidth
/** * This is an internal method for calculating minimum width for Column * resize. * * @return minimum width for column */ double getMinCellWidth(int colIndex) { return columnConfiguration.getMinCellWidth(colIndex); }
3.68
MagicPlugin_EntityExtraData_spawn
// This is only used for specific entity types that require special spawning public SpawnedEntityExtraData spawn(Location location) { return null; }
3.68
hadoop_GetContentSummaryOperation_execute
/** * Return the {@link ContentSummary} of a given path. * @return the summary. * @throws FileNotFoundException if the path does not resolve * @throws IOException failure */ @Override @Retries.RetryTranslated public ContentSummary execute() throws IOException { FileStatus status = probePathStatusOrNull(path, StatusProbeEnum.FILE); if (status != null && status.isFile()) { // f is a file long length = status.getLen(); return new ContentSummary.Builder().length(length). fileCount(1).directoryCount(0).spaceConsumed(length).build(); } final ContentSummary summary = getDirSummary(path); // Log the IOStatistics at debug so the cost of the operation // can be made visible. LOG.debug("IOStatistics of getContentSummary({}):\n{}", path, iostatistics); return summary; }
3.68
AreaShop_DelfriendCommand_canUse
/** * Check if a person can remove friends. * @param person The person to check * @param region The region to check for * @return true if the person can remove friends, otherwise false */ public static boolean canUse(CommandSender person, GeneralRegion region) { if(person.hasPermission("areashop.delfriendall")) { return true; } if(person instanceof Player) { Player player = (Player)person; return region.isOwner(player) && player.hasPermission("areashop.delfriend"); } return false; }
3.68
hbase_AbstractFSWAL_skipRemoteWAL
// Allow temporarily skipping the creation of remote writer. When failing to write to the remote // dfs cluster, we need to reopen the regions and switch to use the original wal writer. But we // need to write a close marker when closing a region, and if it fails, the whole rs will abort. // So here we need to skip the creation of remote writer and make it possible to write the region // close marker. // Setting markerEdit only to true is for transiting from A to S, where we need to give up writing // any pending wal entries as they will be discarded. The remote cluster will replicated the // correct data back later. We still need to allow writing marker edits such as close region event // to allow closing a region. @Override public void skipRemoteWAL(boolean markerEditOnly) { if (markerEditOnly) { this.markerEditOnly = true; } this.skipRemoteWAL = true; }
3.68
hudi_DFSPropertiesConfiguration_loadGlobalProps
/** * Load global props from hudi-defaults.conf which is under class loader or CONF_FILE_DIR_ENV_NAME. * @return Typed Properties */ public static TypedProperties loadGlobalProps() { DFSPropertiesConfiguration conf = new DFSPropertiesConfiguration(); // First try loading the external config file from class loader URL configFile = Thread.currentThread().getContextClassLoader().getResource(DEFAULT_PROPERTIES_FILE); if (configFile != null) { try (BufferedReader br = new BufferedReader(new InputStreamReader(configFile.openStream()))) { conf.addPropsFromStream(br, new Path(configFile.toURI())); return conf.getProps(); } catch (URISyntaxException e) { throw new HoodieException(String.format("Provided props file url is invalid %s", configFile), e); } catch (IOException ioe) { throw new HoodieIOException( String.format("Failed to read %s from class loader", DEFAULT_PROPERTIES_FILE), ioe); } } // Try loading the external config file from local file system Option<Path> defaultConfPath = getConfPathFromEnv(); if (defaultConfPath.isPresent()) { conf.addPropsFromFile(defaultConfPath.get()); } else { try { conf.addPropsFromFile(DEFAULT_PATH); } catch (Exception e) { LOG.warn("Cannot load default config file: " + DEFAULT_PATH, e); } } return conf.getProps(); }
3.68
hadoop_JobTokenSecretManager_retrieveTokenSecret
/** * Look up the token password/secret for the given jobId. * @param jobId the jobId to look up * @return token password/secret as SecretKey * @throws InvalidToken */ public SecretKey retrieveTokenSecret(String jobId) throws InvalidToken { SecretKey tokenSecret = null; synchronized (currentJobTokens) { tokenSecret = currentJobTokens.get(jobId); } if (tokenSecret == null) { throw new InvalidToken("Can't find job token for job " + jobId + " !!"); } return tokenSecret; }
3.68
hbase_GroupingTableMapper_initJob
/** * Use this before submitting a TableMap job. It will appropriately set up the job. * @param table The table to be processed. * @param scan The scan with the columns etc. * @param groupColumns A space separated list of columns used to form the key used in collect. * @param mapper The mapper class. * @param job The current job. * @throws IOException When setting up the job fails. */ @SuppressWarnings("unchecked") public static void initJob(String table, Scan scan, String groupColumns, Class<? extends TableMapper> mapper, Job job) throws IOException { TableMapReduceUtil.initTableMapperJob(table, scan, mapper, ImmutableBytesWritable.class, Result.class, job); job.getConfiguration().set(GROUP_COLUMNS, groupColumns); }
3.68
flink_Configuration_containsKey
/** * Checks whether there is an entry with the specified key. * * @param key key of entry * @return true if the key is stored, false otherwise */ public boolean containsKey(String key) { synchronized (this.confData) { return this.confData.containsKey(key); } }
3.68
framework_Table_removeAllActionHandlers
/** * Removes all action handlers. */ public void removeAllActionHandlers() { actionHandlers = null; actionMapper = null; // Assures the visual refresh. No need to reset the page buffer // before as the content has not changed, only the action // handlers. refreshRenderedCells(); }
3.68
hbase_RequestConverter_buildGetNamespaceDescriptorRequest
/** * Creates a protocol buffer GetNamespaceDescriptorRequest * @return a GetNamespaceDescriptorRequest */ public static GetNamespaceDescriptorRequest buildGetNamespaceDescriptorRequest(final String name) { GetNamespaceDescriptorRequest.Builder builder = GetNamespaceDescriptorRequest.newBuilder(); builder.setNamespaceName(name); return builder.build(); }
3.68
morf_SqlScriptExecutor_executeInternal
/** * @param sql the sql statement to run. * @param connection Database against which to run SQL statements. * @return The number of rows updated/affected by this statement * @throws SQLException throws an exception for statement errors. */ private int executeInternal(String sql, Connection connection) throws SQLException { visitor.beforeExecute(sql); int numberOfRowsUpdated = 0; try { // Skip comments if (sqlDialect.sqlIsComment(sql)) { return 0; } try (Statement statement = connection.createStatement()) { if (log.isDebugEnabled()) log.debug("Executing SQL [" + sql + "]"); boolean result = statement.execute(sql); if (!result) { numberOfRowsUpdated = statement.getUpdateCount(); } if (log.isDebugEnabled()) log.debug("SQL resulted in [" + numberOfRowsUpdated + "] rows updated"); } catch (Exception e) { throw reclassifiedRuntimeException(e, "Error executing SQL [" + sql + "]"); } return numberOfRowsUpdated; } finally { visitor.afterExecute(sql, numberOfRowsUpdated); } }
3.68
hbase_Addressing_parsePort
/** * Parse the port portion of a host-and-port string * @param hostAndPort Formatted as <code>&lt;hostname&gt; ':' &lt;port&gt;</code> * @return The port portion of <code>hostAndPort</code> */ public static int parsePort(final String hostAndPort) { int colonIndex = hostAndPort.lastIndexOf(HOSTNAME_PORT_SEPARATOR); if (colonIndex < 0) { throw new IllegalArgumentException("Not a host:port pair: " + hostAndPort); } return Integer.parseInt(hostAndPort.substring(colonIndex + 1)); }
3.68
hadoop_BaseRecord_checkExpired
/** * Called when the modification time and current time is available, checks for * expirations. * * @param currentTime The current timestamp in ms from the data store, to be * compared against the modification and creation dates of the * object. * @return boolean True if the record has been updated and should be * committed to the data store. Override for customized behavior. */ public boolean checkExpired(long currentTime) { long expiration = getExpirationMs(); long modifiedTime = getDateModified(); if (modifiedTime > 0 && expiration > 0) { return (modifiedTime + expiration) < currentTime; } return false; }
3.68
cron-utils_CronDefinitionBuilder_withSupportedNicknameReboot
/** * Supports cron nickname @reboot * * @return this CronDefinitionBuilder instance */ public CronDefinitionBuilder withSupportedNicknameReboot() { cronNicknames.add(CronNicknames.REBOOT); return this; }
3.68
hbase_ByteBufferOutputStream_getByteBuffer
/** * This flips the underlying BB so be sure to use it _last_! */ public ByteBuffer getByteBuffer() { curBuf.flip(); return curBuf; }
3.68
pulsar_BlobStoreBackedReadHandleImpl_getState
// for testing State getState() { return this.state; }
3.68
hudi_HoodiePipeline_source
/** * Returns the data stream source with given catalog table. * * @param execEnv The execution environment * @param tablePath The table path to the hoodie table in the catalog * @param catalogTable The hoodie catalog table */ private static DataStream<RowData> source(StreamExecutionEnvironment execEnv, ObjectIdentifier tablePath, ResolvedCatalogTable catalogTable) { FactoryUtil.DefaultDynamicTableContext context = Utils.getTableContext(tablePath, catalogTable, Configuration.fromMap(catalogTable.getOptions())); HoodieTableFactory hoodieTableFactory = new HoodieTableFactory(); DataStreamScanProvider dataStreamScanProvider = (DataStreamScanProvider) ((ScanTableSource) hoodieTableFactory .createDynamicTableSource(context)) .getScanRuntimeProvider(new ScanRuntimeProviderContext()); return dataStreamScanProvider.produceDataStream(execEnv); }
3.68
pulsar_RateLimiter_getRate
/** * Returns configured permit rate per pre-configured rate-period. * * @return rate */ public synchronized long getRate() { return this.permits; }
3.68
framework_VaadinService_removeSessionInitListener
/** * Removes a Vaadin service session initialization listener from this * service. * * @see #addSessionInitListener(SessionInitListener) * * @param listener * the Vaadin service session initialization listener to remove. * @deprecated use the {@link Registration} object returned by * {@link #addSessionInitListener(SessionInitListener)} to * remove the listener */ @Deprecated public void removeSessionInitListener(SessionInitListener listener) { sessionInitListeners.remove(listener); }
3.68
hbase_Response_hasBody
/** Returns true if a response body was sent */ public boolean hasBody() { return body != null; }
3.68
hudi_AppendWriteFunction_initWriterHelper
// ------------------------------------------------------------------------- // Utilities // ------------------------------------------------------------------------- private void initWriterHelper() { final String instant = instantToWrite(true); if (instant == null) { // in case there are empty checkpoints that has no input data throw new HoodieException("No inflight instant when flushing data!"); } this.writerHelper = new BulkInsertWriterHelper(this.config, this.writeClient.getHoodieTable(), this.writeClient.getConfig(), instant, this.taskID, getRuntimeContext().getNumberOfParallelSubtasks(), getRuntimeContext().getAttemptNumber(), this.rowType, false, Option.of(writeMetrics)); }
3.68
querydsl_TimeExpression_minute
/** * Create a minutes expression (range 0-59) * * @return minute */ public NumberExpression<Integer> minute() { if (minutes == null) { minutes = Expressions.numberOperation(Integer.class, Ops.DateTimeOps.MINUTE, mixin); } return minutes; }
3.68
flink_RpcGatewayUtils_extractRpcTimeout
/** * Extracts the {@link RpcTimeout} annotated rpc timeout value from the list of given method * arguments. If no {@link RpcTimeout} annotated parameter could be found, then the default * timeout is returned. * * @param parameterAnnotations Parameter annotations * @param args Array of arguments * @param defaultTimeout Default timeout to return if no {@link RpcTimeout} annotated parameter * has been found * @return Timeout extracted from the array of arguments or the default timeout */ public static Duration extractRpcTimeout( Annotation[][] parameterAnnotations, Object[] args, Duration defaultTimeout) { if (args != null) { Preconditions.checkArgument(parameterAnnotations.length == args.length); for (int i = 0; i < parameterAnnotations.length; i++) { if (isRpcTimeout(parameterAnnotations[i])) { if (args[i] instanceof Time) { return TimeUtils.toDuration((Time) args[i]); } else if (args[i] instanceof Duration) { return (Duration) args[i]; } else { throw new RuntimeException( "The rpc timeout parameter must be of type " + Time.class.getName() + " or " + Duration.class.getName() + ". The type " + args[i].getClass().getName() + " is not supported."); } } } } return defaultTimeout; }
3.68
flink_AbstractID_getBytes
/** * Gets the bytes underlying this ID. * * @return The bytes underlying this ID. */ public byte[] getBytes() { byte[] bytes = new byte[SIZE]; longToByteArray(lowerPart, bytes, 0); longToByteArray(upperPart, bytes, SIZE_OF_LONG); return bytes; }
3.68
hadoop_OBSFileSystem_innerGetFileStatus
/** * Inner implementation without retry for {@link #getFileStatus(Path)}. * * @param f the path we want information from * @return a FileStatus object * @throws IOException on IO failure */ @VisibleForTesting OBSFileStatus innerGetFileStatus(final Path f) throws IOException { if (enablePosix) { return OBSPosixBucketUtils.innerFsGetObjectStatus(this, f); } return OBSObjectBucketUtils.innerGetObjectStatus(this, f); }
3.68
hadoop_TimelineReaderUtils_split
/** * Split the passed string along the passed delimiter character while looking * for escape char to interpret the splitted parts correctly. For delimiter or * escape character to be interpreted as part of the string, they have to be * escaped by putting an escape character in front. * @param str string to be split. * @param delimiterChar delimiter used for splitting. * @param escapeChar delimiter and escape character will be escaped using this * character. * @return a list of strings after split. * @throws IllegalArgumentException if string is not properly escaped. */ static List<String> split(final String str, final char delimiterChar, final char escapeChar) throws IllegalArgumentException { if (str == null) { return null; } int len = str.length(); if (len == 0) { return Collections.emptyList(); } List<String> list = new ArrayList<String>(); // Keeps track of offset of the passed string. int offset = 0; // Indicates start offset from which characters will be copied from original // string to destination string. Resets when an escape or delimiter char is // encountered. int startOffset = 0; StringBuilder builder = new StringBuilder(len); // Iterate over the string till we reach the end. while (offset < len) { if (str.charAt(offset) == escapeChar) { // An escape character must be followed by a delimiter or escape char // but we have reached the end and have no further character to look at. if (offset + 1 >= len) { throw new IllegalArgumentException( "Escape char not properly escaped."); } char nextChar = str.charAt(offset + 1); // Next character must be a delimiter or an escape char. if (nextChar != escapeChar && nextChar != delimiterChar) { throw new IllegalArgumentException( "Escape char or delimiter char not properly escaped."); } // Copy contents from the offset where last escape or delimiter char was // encountered. if (startOffset < offset) { builder.append(str.substring(startOffset, offset)); } builder.append(nextChar); offset += 2; // Reset the start offset as an escape char has been encountered. startOffset = offset; continue; } else if (str.charAt(offset) == delimiterChar) { // A delimiter has been encountered without an escape character. // String needs to be split here. Copy remaining chars and add the // string to list. builder.append(str.substring(startOffset, offset)); list.add(builder.toString().trim()); // Reset the start offset as a delimiter has been encountered. startOffset = ++offset; builder = new StringBuilder(len - offset); continue; } offset++; } // Copy rest of the characters. if (!str.isEmpty()) { builder.append(str.substring(startOffset)); } // Add the last part of delimited string to list. list.add(builder.toString().trim()); return list; }
3.68
flink_MemorySegment_putInt
/** * Writes the given int value (32bit, 4 bytes) to the given position in the system's native byte * order. This method offers the best speed for integer writing and should be used unless a * specific byte order is required. In most cases, it suffices to know that the byte order in * which the value is written is the same as the one in which it is read (such as transient * storage in memory, or serialization for I/O and network), making this method the preferable * choice. * * @param index The position at which the value will be written. * @param value The int value to be written. * @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the * segment size minus 4. */ public void putInt(int index, int value) { final long pos = address + index; if (index >= 0 && pos <= addressLimit - 4) { UNSAFE.putInt(heapMemory, pos, value); } else if (address > addressLimit) { throw new IllegalStateException("segment has been freed"); } else { // index is in fact invalid throw new IndexOutOfBoundsException(); } }
3.68
hadoop_DiskBalancerDataNode_hashCode
/** * Returns a hash code value for the object. This method is supported for the * benefit of hash tables such as those provided by {@link HashMap}. */ @Override public int hashCode() { return super.hashCode(); }
3.68
flink_ReflectionUtil_getFullTemplateType
/** * Extract the full type information from the given type. * * @param type to be analyzed * @return Full type information describing the given type */ public static FullTypeInfo getFullTemplateType(Type type) { if (type instanceof ParameterizedType) { ParameterizedType parameterizedType = (ParameterizedType) type; FullTypeInfo[] templateTypeInfos = new FullTypeInfo[parameterizedType.getActualTypeArguments().length]; for (int i = 0; i < parameterizedType.getActualTypeArguments().length; i++) { templateTypeInfos[i] = getFullTemplateType(parameterizedType.getActualTypeArguments()[i]); } return new FullTypeInfo((Class<?>) parameterizedType.getRawType(), templateTypeInfos); } else { return new FullTypeInfo((Class<?>) type, null); } }
3.68
hadoop_CheckpointCommand_getSignature
/** * Checkpoint signature is used to ensure * that nodes are talking about the same checkpoint. */ public CheckpointSignature getSignature() { return cSig; }
3.68
flink_BridgingSqlProcedure_of
/** * Creates an instance of a procedure. * * @param dataTypeFactory used for creating {@link DataType} * @param resolvedProcedure {@link Procedure} with context */ public static BridgingSqlProcedure of( DataTypeFactory dataTypeFactory, ContextResolvedProcedure resolvedProcedure) { final Procedure procedure = resolvedProcedure.getProcedure(); final ProcedureDefinition procedureDefinition = new ProcedureDefinition(procedure); final TypeInference typeInference = TypeInferenceExtractor.forProcedure(dataTypeFactory, procedure.getClass()); return new BridgingSqlProcedure( createName(resolvedProcedure), createSqlIdentifier(resolvedProcedure), createSqlReturnTypeInference(dataTypeFactory, procedureDefinition, typeInference), createSqlOperandTypeInference(dataTypeFactory, procedureDefinition, typeInference), createSqlOperandTypeChecker(dataTypeFactory, procedureDefinition, typeInference), SqlFunctionCategory.USER_DEFINED_PROCEDURE, resolvedProcedure); }
3.68
framework_VTabsheetBase_clearTabKeys
/** For internal use only. May be removed or replaced in the future. */ public void clearTabKeys() { tabKeys.clear(); disabledTabKeys.clear(); }
3.68
hbase_MasterProcedureScheduler_getTableQueue
// ============================================================================ // Table Queue Lookup Helpers // ============================================================================ private TableQueue getTableQueue(TableName tableName) { TableQueue node = AvlTree.get(tableMap, tableName, TABLE_QUEUE_KEY_COMPARATOR); if (node != null) return node; node = new TableQueue(tableName, MasterProcedureUtil.getTablePriority(tableName), locking.getTableLock(tableName), locking.getNamespaceLock(tableName.getNamespaceAsString())); tableMap = AvlTree.insert(tableMap, node); return node; }
3.68
hbase_SnapshotManifest_writeDataManifest
/* * Write the SnapshotDataManifest file */ private void writeDataManifest(final SnapshotDataManifest manifest) throws IOException { try ( FSDataOutputStream stream = workingDirFs.create(new Path(workingDir, DATA_MANIFEST_NAME))) { manifest.writeTo(stream); } }
3.68
framework_WebBrowser_isSafari
/** * Tests whether the user is using Safari. Note that Chrome on iOS is not * detected as Safari but as Chrome although the underlying browser engine * is the same. * * @return true if the user is using Safari, false if the user is not using * Safari or if no information on the browser is present */ public boolean isSafari() { if (browserDetails == null) { return false; } return browserDetails.isSafari(); }
3.68
hbase_SpaceQuotaSnapshot_getLimit
/** * Returns the limit, in bytes, of the target (e.g. table, namespace). */ @Override public long getLimit() { return limit; }
3.68
hadoop_LoggedTask_setUnknownAttribute
// for input parameter ignored. @JsonAnySetter public void setUnknownAttribute(String attributeName, Object ignored) { if (!alreadySeenAnySetterAttributes.contains(attributeName)) { alreadySeenAnySetterAttributes.add(attributeName); System.err.println("In LoggedJob, we saw the unknown attribute " + attributeName + "."); } }
3.68
flink_DualInputOperator_addSecondInput
/** * Add to the second input the union of the given operators. * * @param input The operator(s) to be unioned with the second input. * @deprecated This method will be removed in future versions. Use the {@link Union} operator * instead. */ @Deprecated public void addSecondInput(Operator<IN2>... input) { this.input2 = Operator.createUnionCascade(this.input2, input); }
3.68
flink_CopyOnWriteStateMap_addNewStateMapEntry
/** Creates and inserts a new {@link StateMapEntry}. */ private StateMapEntry<K, N, S> addNewStateMapEntry( StateMapEntry<K, N, S>[] table, K key, N namespace, int hash) { // small optimization that aims to avoid holding references on duplicate namespace objects if (namespace.equals(lastNamespace)) { namespace = lastNamespace; } else { lastNamespace = namespace; } int index = hash & (table.length - 1); StateMapEntry<K, N, S> newEntry = new StateMapEntry<>( key, namespace, null, hash, table[index], stateMapVersion, stateMapVersion); table[index] = newEntry; if (table == primaryTable) { ++primaryTableSize; } else { ++incrementalRehashTableSize; } return newEntry; }
3.68
hadoop_HttpReferrerAuditHeader_withPath1
/** * Set Path1 of operation. * @param value new value * @return the builder */ public Builder withPath1(final String value) { path1 = value; return this; }
3.68
flink_AbstractStreamOperatorV2_getInternalTimerService
/** * Returns a {@link InternalTimerService} that can be used to query current processing time and * event time and to set timers. An operator can have several timer services, where each has its * own namespace serializer. Timer services are differentiated by the string key that is given * when requesting them, if you call this method with the same key multiple times you will get * the same timer service instance in subsequent requests. * * <p>Timers are always scoped to a key, the currently active key of a keyed stream operation. * When a timer fires, this key will also be set as the currently active key. * * <p>Each timer has attached metadata, the namespace. Different timer services can have a * different namespace type. If you don't need namespace differentiation you can use {@link * VoidNamespaceSerializer} as the namespace serializer. * * @param name The name of the requested timer service. If no service exists under the given * name a new one will be created and returned. * @param namespaceSerializer {@code TypeSerializer} for the timer namespace. * @param triggerable The {@link Triggerable} that should be invoked when timers fire * @param <N> The type of the timer namespace. */ @VisibleForTesting public <K, N> InternalTimerService<N> getInternalTimerService( String name, TypeSerializer<N> namespaceSerializer, Triggerable<K, N> triggerable) { if (timeServiceManager == null) { throw new RuntimeException("The timer service has not been initialized."); } @SuppressWarnings("unchecked") InternalTimeServiceManager<K> keyedTimeServiceHandler = (InternalTimeServiceManager<K>) timeServiceManager; KeyedStateBackend<K> keyedStateBackend = getKeyedStateBackend(); checkState(keyedStateBackend != null, "Timers can only be used on keyed operators."); return keyedTimeServiceHandler.getInternalTimerService( name, keyedStateBackend.getKeySerializer(), namespaceSerializer, triggerable); }
3.68
hudi_SerDeHelper_parseSchemas
/** * Convert json string to history internalSchemas. * TreeMap is used to hold history internalSchemas. * * @param json a json string * @return a TreeMap */ public static TreeMap<Long, InternalSchema> parseSchemas(String json) { TreeMap<Long, InternalSchema> result = new TreeMap<>(); try { JsonNode jsonNode = (new ObjectMapper(new JsonFactory())).readValue(json, JsonNode.class); if (!jsonNode.has(SCHEMAS)) { throw new IllegalArgumentException(String.format("cannot parser schemas from current json string, missing key name: %s", SCHEMAS)); } JsonNode schemas = jsonNode.get(SCHEMAS); Iterator<JsonNode> iter = schemas.elements(); while (iter.hasNext()) { JsonNode schema = iter.next(); InternalSchema current = fromJson(schema); result.put(current.schemaId(), current); } } catch (IOException e) { throw new HoodieException(e); } return result; }
3.68
flink_FileInputFormat_close
/** Closes the file input stream of the input format. */ @Override public void close() throws IOException { if (this.stream != null) { // close input stream this.stream.close(); stream = null; } }
3.68
hudi_InputPathHandler_parseInputPaths
/** * Takes in the original InputPaths and classifies each of them into incremental, snapshot and * non-hoodie InputPaths. The logic is as follows: * 1. Check if an inputPath starts with the same basePath as any of the metadata basePaths we know * 1a. If yes, this belongs to a Hoodie table that we already know about. Simply classify this * as incremental or snapshot - We can get the table name of this inputPath from the * metadata. Then based on the list of incrementalTables, we can classify this inputPath. * 1b. If no, this could be a new Hoodie Table we haven't seen yet or a non-Hoodie Input Path. * Try creating the HoodieTableMetadataClient. * - If it succeeds, further classify as incremental on snapshot as described in step * 1a above. * - If DatasetNotFoundException/InvalidDatasetException is caught, this is a * non-Hoodie inputPath * @param inputPaths - InputPaths from the original jobConf that was passed to HoodieInputFormat * @param incrementalTables - List of all incremental tables extracted from the config * `hoodie.&lt;table-name&gt;.consume.mode=INCREMENTAL` * @throws IOException */ private void parseInputPaths(Path[] inputPaths, List<String> incrementalTables) throws IOException { for (Path inputPath : inputPaths) { boolean basePathKnown = false; for (HoodieTableMetaClient metaClient : tableMetaClientMap.values()) { if (inputPath.toString().contains(metaClient.getBasePath())) { // We already know the base path for this inputPath. basePathKnown = true; // Check if this is for a snapshot query tagAsIncrementalOrSnapshot(inputPath, metaClient, incrementalTables); break; } } if (!basePathKnown) { // This path is for a table that we don't know about yet. HoodieTableMetaClient metaClient; try { metaClient = getTableMetaClientForBasePathUnchecked(conf, inputPath); tableMetaClientMap.put(getIncrementalTable(metaClient), metaClient); tagAsIncrementalOrSnapshot(inputPath, metaClient, incrementalTables); } catch (TableNotFoundException | InvalidTableException e) { // This is a non Hoodie inputPath LOG.info("Handling a non-hoodie path " + inputPath); nonHoodieInputPaths.add(inputPath); } } } }
3.68
flink_FailureResult_canRestart
/** * Creates a FailureResult which allows to restart the job. * * @param failureCause failureCause for restarting the job * @param backoffTime backoffTime to wait before restarting the job * @return FailureResult which allows to restart the job */ static FailureResult canRestart(Throwable failureCause, Duration backoffTime) { return new FailureResult(failureCause, backoffTime); }
3.68
flink_TSetClientInfoResp_isSetStatus
/** Returns true if field status is set (has been assigned a value) and false otherwise */ public boolean isSetStatus() { return this.status != null; }
3.68
hadoop_GlobPattern_compile
/** * Compile glob pattern string * @param globPattern the glob pattern * @return the pattern object */ public static Pattern compile(String globPattern) { return new GlobPattern(globPattern).compiled(); }
3.68
hadoop_HttpReferrerAuditHeader_withContextId
/** * Set context ID. * @param value context * @return the builder */ public Builder withContextId(final String value) { contextId = value; return this; }
3.68
morf_SqlDialect_convertStatementToSQL
/** * Converts a structured {@link SelectFirstStatement} to the equivalent SQL * text. * * @param statement the statement to convert * @return a string containing the SQL to run against the database */ public String convertStatementToSQL(SelectFirstStatement statement) { if (statement == null) { throw new IllegalArgumentException(CANNOT_CONVERT_NULL_STATEMENT_TO_SQL); } if (statement.getOrderBys().isEmpty()) { throw new IllegalArgumentException("Invalid select first statement - missing order by clause"); } return getSqlFrom(statement); }
3.68
dubbo_ConcurrentHashMapUtils_computeIfAbsent
/** * A temporary workaround for Java 8 ConcurrentHashMap#computeIfAbsent specific performance issue: JDK-8161372.</br> * @see <a href="https://bugs.openjdk.java.net/browse/JDK-8161372">https://bugs.openjdk.java.net/browse/JDK-8161372</a> * */ public static <K, V> V computeIfAbsent(ConcurrentMap<K, V> map, K key, Function<? super K, ? extends V> func) { Objects.requireNonNull(func); if (JRE.JAVA_8.isCurrentVersion()) { V v = map.get(key); if (null == v) { // issue#11986 lock bug // v = map.computeIfAbsent(key, func); // this bug fix methods maybe cause `func.apply` multiple calls. v = func.apply(key); if (null == v) { return null; } final V res = map.putIfAbsent(key, v); if (null != res) { // if pre value present, means other thread put value already, and putIfAbsent not effect // return exist value return res; } // if pre value is null, means putIfAbsent effected, return current value } return v; } else { return map.computeIfAbsent(key, func); } }
3.68
rocketmq-connect_ExtendKeyValue_getList
/** * get list by class * * @param s * @param clazz * @param <T> * @return */ public <T> List<T> getList(String s, Class<T> clazz) { List configs = getList(s); List<T> castConfigs = new ArrayList<>(); configs.forEach(config -> { castConfigs.add(clazz.cast(config)); }); return castConfigs; }
3.68
hadoop_BufferData_setReady
/** * Marks the completion of reading data into the buffer. * The buffer cannot be modified once in this state. * * @param expectedCurrentState the collection of states from which transition to READY is allowed. */ public synchronized void setReady(State... expectedCurrentState) { if (this.checksum != 0) { throw new IllegalStateException("Checksum cannot be changed once set"); } this.buffer = this.buffer.asReadOnlyBuffer(); this.checksum = getChecksum(this.buffer); this.buffer.rewind(); this.updateState(State.READY, expectedCurrentState); }
3.68
flink_SortUtil_putDoubleNormalizedKey
/** See http://stereopsis.com/radix.html for more details. */ public static void putDoubleNormalizedKey( double value, MemorySegment target, int offset, int numBytes) { long lValue = Double.doubleToLongBits(value); lValue ^= ((lValue >> (Long.SIZE - 1)) | Long.MIN_VALUE); NormalizedKeyUtil.putUnsignedLongNormalizedKey(lValue, target, offset, numBytes); }
3.68
hudi_Base64CodecUtil_decode
/** * Decodes data from the input string into using the encoding scheme. * * @param encodedString - Base64 encoded string to decode * @return A newly-allocated byte array containing the decoded bytes. */ public static byte[] decode(String encodedString) { return Base64.getDecoder().decode(getUTF8Bytes(encodedString)); }
3.68
hbase_WALCoprocessorHost_postWALRoll
/** * Called after rolling the current WAL * @param oldPath the path of the wal that we replaced * @param newPath the path of the wal we have created and now is the current */ public void postWALRoll(Path oldPath, Path newPath) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new WALObserverOperation() { @Override protected void call(WALObserver observer) throws IOException { observer.postWALRoll(this, oldPath, newPath); } }); }
3.68
framework_Table_getDragMode
/** * @return the current start mode of the Table. Drag start mode controls how * Table behaves as a drag source. */ public TableDragMode getDragMode() { return dragMode; }
3.68
framework_FilesystemContainer_removeContainerProperty
/* * (non-Javadoc) * * @see com.vaadin.data.Container#removeContainerProperty(java.lang.Object ) */ @Override public boolean removeContainerProperty(Object propertyId) throws UnsupportedOperationException { throw new UnsupportedOperationException( "File system container does not support this operation"); }
3.68
rocketmq-connect_LRUCache_get
/** * @param key * @return */ @Override public V get(K key) { return cache.get(key); }
3.68