name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
querydsl_Expressions_dslPath
/** * Create a new Path expression * * @param type type of expression * @param metadata path metadata * @param <T> type of expression * @return path expression */ public static <T> DslPath<T> dslPath(Class<? extends T> type, PathMetadata metadata) { return new DslPath<T>(type, metadata); }
3.68
hadoop_BlockGrouper_anyRecoverable
/** * Given a BlockGroup, tell if any of the missing blocks can be recovered, * to be called by ECManager * @param blockGroup a blockGroup that may contain erased blocks but not sure * recoverable or not * @return true if any erased block recoverable, false otherwise */ public boolean anyRecoverable(ECBlockGroup blockGroup) { int erasedCount = blockGroup.getErasedCount(); return erasedCount > 0 && erasedCount <= getRequiredNumParityBlocks(); }
3.68
hadoop_LoggingAuditor_deactivate
/** * Log at TRACE. */ @Override public void deactivate() { LOG.trace("[{}] {} Deactivate {}", currentThreadID(), getSpanId(), getDescription()); }
3.68
hadoop_SimpleExponentialSmoothing_isDataStagnated
/** * checks if the task is hanging up. * @param timeStamp current time of the scan. * @return true if we have number of samples {@literal >} kMinimumReads and the * record timestamp has expired. */ public boolean isDataStagnated(final long timeStamp) { ForecastRecord rec = forecastRefEntry.get(); if (rec != null && rec.myIndex > kMinimumReads) { return (rec.timeStamp + kStagnatedWindow) > timeStamp; } return false; }
3.68
hudi_HoodieTimeline_isInClosedOpenRange
/** * Return true if specified timestamp is in range [startTs, endTs). */ static boolean isInClosedOpenRange(String timestamp, String startTs, String endTs) { return HoodieTimeline.compareTimestamps(timestamp, GREATER_THAN_OR_EQUALS, startTs) && HoodieTimeline.compareTimestamps(timestamp, LESSER_THAN, endTs); }
3.68
pulsar_FunctionRuntimeManager_removeAssignments
/** * Removes a collection of assignments. * * @param assignments assignments to remove */ public synchronized void removeAssignments(Collection<Assignment> assignments) { for (Assignment assignment : assignments) { this.deleteAssignment(assignment); } }
3.68
morf_AliasedField_assetImmutableDslDisabled
/** * TODO remove when we remove the old mutable behaviour. */ public static void assetImmutableDslDisabled() { if (immutableDslEnabled()) { throw new UnsupportedOperationException("Cannot modify a statement when immutability is configured."); } }
3.68
hadoop_ManifestCommitterSupport_createManifestOutcome
/** * Create success/outcome data. * @param stageConfig configuration. * @param stage * @return a _SUCCESS object with some diagnostics. */ public static ManifestSuccessData createManifestOutcome( StageConfig stageConfig, String stage) { final ManifestSuccessData outcome = new ManifestSuccessData(); outcome.setJobId(stageConfig.getJobId()); outcome.setJobIdSource(stageConfig.getJobIdSource()); outcome.setCommitter(MANIFEST_COMMITTER_CLASSNAME); // real timestamp outcome.setTimestamp(System.currentTimeMillis()); final ZonedDateTime now = ZonedDateTime.now(); outcome.setDate(now.toString()); outcome.setHostname(NetUtils.getLocalHostname()); // add some extra diagnostics which can still be parsed by older // builds of test applications. // Audit Span information can go in here too, in future. try { outcome.putDiagnostic(PRINCIPAL, UserGroupInformation.getCurrentUser().getShortUserName()); } catch (IOException ignored) { // don't know who we are? exclude from the diagnostics. } outcome.putDiagnostic(STAGE, stage); return outcome; }
3.68
flink_MemorySegment_getFloatBigEndian
/** * Reads a single-precision floating point value (32bit, 4 bytes) from the given position, in * big endian byte order. This method's speed depends on the system's native byte order, and it * is possibly slower than {@link #getFloat(int)}. For most cases (such as transient storage in * memory or serialization for I/O and network), it suffices to know that the byte order in * which the value is written is the same as the one in which it is read, and {@link * #getFloat(int)} is the preferable choice. * * @param index The position from which the value will be read. * @return The long value at the given position. * @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the * segment size minus 4. */ public float getFloatBigEndian(int index) { return Float.intBitsToFloat(getIntBigEndian(index)); }
3.68
zilla_HpackContext_staticIndex10
// Index in static table for the given name of length 10 private static int staticIndex10(DirectBuffer name) { switch (name.getByte(9)) { case 'e': if (STATIC_TABLE[55].name.equals(name)) // set-cookie { return 55; } break; case 't': if (STATIC_TABLE[58].name.equals(name)) // user-agent { return 58; } break; case 'y': if (STATIC_TABLE[1].name.equals(name)) // :authority { return 1; } break; } return -1; }
3.68
dubbo_DubboComponentScanRegistrar_registerServiceAnnotationPostProcessor
/** * Registers {@link ServiceAnnotationPostProcessor} * * @param packagesToScan packages to scan without resolving placeholders * @param registry {@link BeanDefinitionRegistry} * @since 2.5.8 */ private void registerServiceAnnotationPostProcessor(Set<String> packagesToScan, BeanDefinitionRegistry registry) { BeanDefinitionBuilder builder = rootBeanDefinition(ServiceAnnotationPostProcessor.class); builder.addConstructorArgValue(packagesToScan); builder.setRole(BeanDefinition.ROLE_INFRASTRUCTURE); AbstractBeanDefinition beanDefinition = builder.getBeanDefinition(); BeanDefinitionReaderUtils.registerWithGeneratedName(beanDefinition, registry); }
3.68
hbase_MasterSnapshotVerifier_verifyRegionInfo
/** * Verify that the regionInfo is valid * @param region the region to check * @param manifest snapshot manifest to inspect */ private void verifyRegionInfo(final RegionInfo region, final SnapshotRegionManifest manifest) throws IOException { RegionInfo manifestRegionInfo = ProtobufUtil.toRegionInfo(manifest.getRegionInfo()); if (RegionInfo.COMPARATOR.compare(region, manifestRegionInfo) != 0) { String msg = "Manifest region info " + manifestRegionInfo + "doesn't match expected region:" + region; throw new CorruptedSnapshotException(msg, ProtobufUtil.createSnapshotDesc(snapshot)); } }
3.68
hbase_HFileBlock_readAtOffset
/** * Does a positional read or a seek and read into the given byte buffer. We need take care that * we will call the {@link ByteBuff#release()} for every exit to deallocate the ByteBuffers, * otherwise the memory leak may happen. * @param dest destination buffer * @param size size of read * @param peekIntoNextBlock whether to read the next block's on-disk size * @param fileOffset position in the stream to read at * @param pread whether we should do a positional read * @param istream The input source of data * @return true to indicate the destination buffer include the next block header, otherwise only * include the current block data without the next block header. * @throws IOException if any IO error happen. */ protected boolean readAtOffset(FSDataInputStream istream, ByteBuff dest, int size, boolean peekIntoNextBlock, long fileOffset, boolean pread) throws IOException { if (!pread) { // Seek + read. Better for scanning. istream.seek(fileOffset); long realOffset = istream.getPos(); if (realOffset != fileOffset) { throw new IOException("Tried to seek to " + fileOffset + " to read " + size + " bytes, but pos=" + realOffset + " after seek"); } if (!peekIntoNextBlock) { BlockIOUtils.readFully(dest, istream, size); return false; } // Try to read the next block header if (!BlockIOUtils.readWithExtra(dest, istream, size, hdrSize)) { // did not read the next block header. return false; } } else { // Positional read. Better for random reads; or when the streamLock is already locked. int extraSize = peekIntoNextBlock ? hdrSize : 0; if ( !BlockIOUtils.preadWithExtra(dest, istream, fileOffset, size, extraSize, isPreadAllBytes) ) { // did not read the next block header. return false; } } assert peekIntoNextBlock; return true; }
3.68
hadoop_OBSBlockOutputStream_close
/** * Close the stream. * * <p>This will not return until the upload is complete or the attempt to * perform the upload has failed. Exceptions raised in this method are * indicative that the write has failed and data is at risk of being lost. * * @throws IOException on any failure. */ @Override public synchronized void close() throws IOException { if (closed.getAndSet(true)) { // already closed LOG.debug("Ignoring close() as stream is already closed"); return; } if (hasException.get()) { String closeWarning = String.format( "closed has error. bs : pre write obs[%s] has error.", key); LOG.warn(closeWarning); throw new IOException(closeWarning); } // do upload completeCurrentBlock(); // clear clearHFlushOrSync(); // All end of write operations, including deleting fake parent // directories writeOperationHelper.writeSuccessful(key); }
3.68
hadoop_HdfsLocatedFileStatus_getErasureCodingPolicy
/** * Get the erasure coding policy if it's set. * @return the erasure coding policy */ @Override public ErasureCodingPolicy getErasureCodingPolicy() { return ecPolicy; }
3.68
flink_KeyGroupsStateHandle_getOffsetForKeyGroup
/** * @param keyGroupId the id of a key-group. the id must be contained in the range of this * handle. * @return offset to the position of data for the provided key-group in the stream referenced by * this state handle */ public long getOffsetForKeyGroup(int keyGroupId) { return groupRangeOffsets.getKeyGroupOffset(keyGroupId); }
3.68
hbase_TableDescriptorBuilder_removeColumnFamily
/** * Removes the ColumnFamilyDescriptor with name specified by the parameter column from the table * descriptor * @param column Name of the column family to be removed. * @return Column descriptor for the passed family name or the family on passed in column. */ public ColumnFamilyDescriptor removeColumnFamily(final byte[] column) { return this.families.remove(column); }
3.68
framework_AbstractEmbedded_getSource
/** * Get the object source resource. * * @return the source */ public Resource getSource() { return getResource(AbstractEmbeddedState.SOURCE_RESOURCE); }
3.68
flink_JsonRowSerializationSchema_build
/** * Finalizes the configuration and checks validity. * * @return Configured {@link JsonRowSerializationSchema} */ public JsonRowSerializationSchema build() { checkArgument(typeInfo != null, "typeInfo should be set."); return new JsonRowSerializationSchema(typeInfo); }
3.68
morf_SchemaModificationAdapter_open
/** * @see org.alfasoftware.morf.dataset.DataSetAdapter#open() */ @Override public void open() { super.open(); schemaResource = databaseDataSetConsumer.connectionResources.openSchemaResource(databaseDataSetConsumer.getDataSource()); try { connection = databaseDataSetConsumer.getDataSource().getConnection(); } catch (SQLException e) { throw new RuntimeSqlException("Error closing connection", e); } // get a list of all the tables at the start for (Table table : schemaResource.tables()) { remainingTables.add(table.getName().toUpperCase()); table.indexes().forEach(index -> existingIndexNamesAndTables.put(index.getName().toUpperCase(), table)); } }
3.68
dubbo_ClassUtils_forName
/** * Replacement for <code>Class.forName()</code> that also returns Class * instances for primitives (like "int") and array class names (like * "String[]"). * * @param name the name of the Class * @param classLoader the class loader to use (may be <code>null</code>, * which indicates the default class loader) * @return Class instance for the supplied name * @throws ClassNotFoundException if the class was not found * @throws LinkageError if the class file could not be loaded * @see Class#forName(String, boolean, ClassLoader) */ public static Class<?> forName(String name, ClassLoader classLoader) throws ClassNotFoundException, LinkageError { Class<?> clazz = resolvePrimitiveClassName(name); if (clazz != null) { return clazz; } // "java.lang.String[]" style arrays if (name.endsWith(ARRAY_SUFFIX)) { String elementClassName = name.substring(0, name.length() - ARRAY_SUFFIX.length()); Class<?> elementClass = forName(elementClassName, classLoader); return Array.newInstance(elementClass, 0).getClass(); } // "[Ljava.lang.String;" style arrays int internalArrayMarker = name.indexOf(INTERNAL_ARRAY_PREFIX); if (internalArrayMarker != -1 && name.endsWith(";")) { String elementClassName = null; if (internalArrayMarker == 0) { elementClassName = name.substring(INTERNAL_ARRAY_PREFIX.length(), name.length() - 1); } else if (name.startsWith("[")) { elementClassName = name.substring(1); } Class<?> elementClass = forName(elementClassName, classLoader); return Array.newInstance(elementClass, 0).getClass(); } ClassLoader classLoaderToUse = classLoader; if (classLoaderToUse == null) { classLoaderToUse = getClassLoader(); } return classLoaderToUse.loadClass(name); }
3.68
hibernate-validator_AbstractMethodOverrideCheck_isJavaLangObjectOrNull
/** * Determine if the provided {@link TypeElement} represents a {@link java.lang.Object} or is {@code null}. * * @param typeElement the element to check * @return {@code true} if the provided element is {@link java.lang.Object} or is {@code null}, {@code false} otherwise */ private boolean isJavaLangObjectOrNull(TypeElement typeElement) { return typeElement == null || JAVA_LANG_OBJECT.contentEquals( typeElement.getQualifiedName() ); }
3.68
framework_ContainerOrderedWrapper_removeItem
/** * Removes an Item specified by the itemId from the underlying container and * from the ordering. * * @param itemId * the ID of the Item to be removed. * @return <code>true</code> if the operation succeeded, <code>false</code> * if not * @throws UnsupportedOperationException * if the removeItem is not supported. */ @Override public boolean removeItem(Object itemId) throws UnsupportedOperationException { final boolean success = container.removeItem(itemId); if (!ordered && success) { removeFromOrderWrapper(itemId); } return success; }
3.68
flink_MailboxProcessor_runMailboxStep
/** * Execute a single (as small as possible) step of the mailbox. * * @return true if something was processed. */ @VisibleForTesting public boolean runMailboxStep() throws Exception { suspended = !mailboxLoopRunning; if (processMail(mailbox, true)) { return true; } if (isDefaultActionAvailable() && isNextLoopPossible()) { mailboxDefaultAction.runDefaultAction(new MailboxController(this)); return true; } return false; }
3.68
hadoop_Preconditions_getDefaultNullMSG
/* @VisibleForTesting */ static String getDefaultNullMSG() { return VALIDATE_IS_NOT_NULL_EX_MESSAGE; }
3.68
hbase_StateMachineProcedure_setNextState
/** * Set the next state for the procedure. * @param stateId the ordinal() of the state enum (or state id) */ private void setNextState(final int stateId) { if (states == null || states.length == stateCount) { int newCapacity = stateCount + 8; if (states != null) { states = Arrays.copyOf(states, newCapacity); } else { states = new int[newCapacity]; } } states[stateCount++] = stateId; }
3.68
MagicPlugin_EntityExtraData_isSplittable
// Here for slime-like mobs public boolean isSplittable() { return true; }
3.68
framework_VTabsheet_updateTabScroller
/** * Layouts the tab-scroller elements, and applies styles. */ private void updateTabScroller() { if (!isDynamicWidth()) { tabs.getStyle().setWidth(100, Unit.PCT); } // Make sure scrollerIndex is valid boolean changed = false; if (scrollerIndex < 0 || scrollerIndex > tb.getTabCount()) { scrollerIndex = tb.getFirstVisibleTab(); changed = true; } else if (tb.getTabCount() > 0 && tb.getTab(scrollerIndex).isHiddenOnServer()) { scrollerIndex = tb.getNextVisibleTab(scrollerIndex); changed = true; } // This element is hidden by Valo, test with legacy themes. TableCellElement spacerCell = ((TableCellElement) tb.spacerTd.cast()); if (scroller.getStyle().getDisplay() != "none") { spacerCell.getStyle().setPropertyPx("minWidth", scroller.getOffsetWidth()); spacerCell.getStyle().setPropertyPx("minHeight", 1); } else { spacerCell.getStyle().setProperty("minWidth", "0"); spacerCell.getStyle().setProperty("minHeight", "0"); } // check if hidden tabs need to be scrolled back into view while (hasScrolledTabs() && (getLeftGap() + getRightGap() >= getFirstOutOfViewWidth())) { scrollerIndex = tb.scrollLeft(scrollerIndex); Tab currentFirst = tb.getTab(scrollerIndex); scrollerPositionTabId = currentFirst.id; // the styles might affect the next round of calculations, must // update on every round currentFirst.setStyleNames(scrollerIndex == activeTabIndex, true, true); currentFirst.recalculateCaptionWidth(); // everything up to date, can remove the check changed = false; } if (changed) { Tab currentFirst = tb.getTab(scrollerIndex); currentFirst.setStyleNames(scrollerIndex == activeTabIndex, true, true); scrollerPositionTabId = currentFirst.id; } boolean scrolled = hasScrolledTabs(); boolean clipped = hasClippedTabs(); if (tb.getTabCount() > 0 && tb.isVisible() && (scrolled || clipped)) { scroller.getStyle().clearDisplay(); scrollerPrev.setPropertyString("className", SCROLLER_CLASSNAME + (scrolled ? "Prev" : PREV_SCROLLER_DISABLED_CLASSNAME)); scrollerNext.setPropertyString("className", SCROLLER_CLASSNAME + (clipped && scrollerIndex != tb.getLastVisibleTab() ? "Next" : "Next-disabled")); // the active tab should be focusable if and only if it is visible boolean isActiveTabVisible = scrollerIndex <= activeTabIndex && !isClipped(tb.selected); tb.selected.setTabulatorIndex( isActiveTabVisible ? tabulatorIndex : -1); } else { scroller.getStyle().setDisplay(Display.NONE); } if (BrowserInfo.get().isSafariOrIOS()) { /* * another hack for webkits. tabscroller sometimes drops without * "shaking it" reproducable in * com.vaadin.tests.components.tabsheet.TabSheetIcons */ final Style style = scroller.getStyle(); style.setProperty("whiteSpace", "normal"); Scheduler.get().scheduleDeferred( () -> style.setProperty("whiteSpace", "")); } }
3.68
hudi_HoodieTimeline_getLogCompactionRequestedInstant
// Returns Log compaction requested instant static HoodieInstant getLogCompactionRequestedInstant(final String timestamp) { return new HoodieInstant(State.REQUESTED, LOG_COMPACTION_ACTION, timestamp); }
3.68
pulsar_AbstractSinkRecord_cumulativeAck
/** * Some sink sometimes wants to control the ack type. */ public void cumulativeAck() { if (sourceRecord instanceof PulsarRecord) { PulsarRecord pulsarRecord = (PulsarRecord) sourceRecord; pulsarRecord.cumulativeAck(); } else { throw new RuntimeException("SourceRecord class type must be PulsarRecord"); } }
3.68
hbase_MasterObserver_postRequestLock
/** * Called after new LockProcedure is queued. * @param ctx the environment to interact with the framework and master */ default void postRequestLock(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace, TableName tableName, RegionInfo[] regionInfos, String description) throws IOException { }
3.68
AreaShop_RentingRegionEvent_isExtending
/** * Check if the player is extending the region or renting it for the first time. * @return true if the player tries to extend the region, false if he tries to rent it the first time */ public boolean isExtending() { return extending; }
3.68
flink_CatalogCalciteSchema_getSubSchema
/** * Look up a sub-schema (database) by the given sub-schema name. * * @param schemaName name of sub-schema to look up * @return the sub-schema with a given database name, or null */ @Override public Schema getSubSchema(String schemaName) { if (catalogManager.schemaExists(catalogName, schemaName)) { if (getSchemaVersion().isPresent()) { return new DatabaseCalciteSchema( catalogName, schemaName, catalogManager, isStreamingMode) .snapshot(getSchemaVersion().get()); } else { return new DatabaseCalciteSchema( catalogName, schemaName, catalogManager, isStreamingMode); } } else { return null; } }
3.68
flink_AbstractBlobCache_getFileInternal
/** * Returns local copy of the file for the BLOB with the given key. * * <p>The method will first attempt to serve the BLOB from its local cache. If the BLOB is not * in the cache, the method will try to download it from this cache's BLOB server via a * distributed BLOB store (if available) or direct end-to-end download. * * @param jobId ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated) * @param blobKey The key of the desired BLOB. * @return file referring to the local storage location of the BLOB. * @throws IOException Thrown if an I/O error occurs while downloading the BLOBs from the BLOB * server. */ protected File getFileInternal(@Nullable JobID jobId, BlobKey blobKey) throws IOException { checkArgument(blobKey != null, "BLOB key cannot be null."); final File localFile = BlobUtils.getStorageLocation(storageDir.deref(), jobId, blobKey); readWriteLock.readLock().lock(); try { if (localFile.exists()) { return localFile; } } finally { readWriteLock.readLock().unlock(); } // first try the distributed blob store (if available) // use a temporary file (thread-safe without locking) File incomingFile = createTemporaryFilename(); try { try { if (blobView.get(jobId, blobKey, incomingFile)) { // now move the temp file to our local cache atomically readWriteLock.writeLock().lock(); try { BlobUtils.moveTempFileToStore( incomingFile, jobId, blobKey, localFile, log, null); } finally { readWriteLock.writeLock().unlock(); } return localFile; } } catch (Exception e) { log.info( "Failed to copy from blob store. Downloading from BLOB server instead.", e); } final InetSocketAddress currentServerAddress = serverAddress; if (currentServerAddress != null) { // fallback: download from the BlobServer BlobClient.downloadFromBlobServer( jobId, blobKey, incomingFile, currentServerAddress, blobClientConfig, numFetchRetries); readWriteLock.writeLock().lock(); try { BlobUtils.moveTempFileToStore( incomingFile, jobId, blobKey, localFile, log, null); } finally { readWriteLock.writeLock().unlock(); } } else { throw new IOException( "Cannot download from BlobServer, because the server address is unknown."); } return localFile; } finally { // delete incomingFile from a failed download if (!incomingFile.delete() && incomingFile.exists()) { log.warn( "Could not delete the staging file {} for blob key {} and job {}.", incomingFile, blobKey, jobId); } } }
3.68
hadoop_RpcProgramPortmap_getport
/** * Given a program number "prog", version number "vers", and transport * protocol number "prot", this procedure returns the port number on which the * program is awaiting call requests. A port value of zeros means the program * has not been registered. The "port" field of the argument is ignored. */ private XDR getport(int xid, XDR in, XDR out) { PortmapMapping mapping = PortmapRequest.mapping(in); String key = PortmapMapping.key(mapping); if (LOG.isDebugEnabled()) { LOG.debug("Portmap GETPORT key=" + key + " " + mapping); } PortmapMapping value = map.get(key); int res = 0; if (value != null) { res = value.getPort(); if (LOG.isDebugEnabled()) { LOG.debug("Found mapping for key: " + key + " port:" + res); } } else { LOG.warn("Warning, no mapping for key: " + key); } return PortmapResponse.intReply(out, xid, res); }
3.68
framework_ApplicationConnection_showError
/** * Shows an error notification. * * @param details * Optional details. * @param message * An ErrorMessage describing the error. */ protected void showError(String details, ErrorMessage message) { VNotification.showError(this, message.getCaption(), message.getMessage(), details, message.getUrl()); }
3.68
hudi_HoodieMetaSyncOperations_updateSerdeProperties
/** * Update the SerDe properties in metastore. * * @return true if properties updated. */ default boolean updateSerdeProperties(String tableName, Map<String, String> serdeProperties, boolean useRealtimeFormat) { return false; }
3.68
hbase_FileLink_handleAccessLocationException
/** * Handle exceptions which are thrown when access locations of file link * @param fileLink the file link * @param newException the exception caught by access the current location * @param previousException the previous exception caught by access the other locations * @return return AccessControlException if access one of the locations caught, otherwise return * FileNotFoundException. The AccessControlException is threw if user scan snapshot * feature is enabled, see * {@link org.apache.hadoop.hbase.security.access.SnapshotScannerHDFSAclController}. * @throws IOException if the exception is neither AccessControlException nor * FileNotFoundException */ private static IOException handleAccessLocationException(FileLink fileLink, IOException newException, IOException previousException) throws IOException { if (newException instanceof RemoteException) { newException = ((RemoteException) newException) .unwrapRemoteException(FileNotFoundException.class, AccessControlException.class); } if (newException instanceof FileNotFoundException) { // Try another file location if (previousException == null) { previousException = new FileNotFoundException(fileLink.toString()); } } else if (newException instanceof AccessControlException) { // Try another file location previousException = newException; } else { throw newException; } return previousException; }
3.68
framework_BrowserInfo_requiresTouchScrollDelegate
/** * Checks if the browser is capable of handling scrolling natively or if a * touch scroll helper is needed for scrolling. * * @return true if browser needs a touch scroll helper, false if the browser * can handle scrolling natively */ public boolean requiresTouchScrollDelegate() { if (!isTouchDevice()) { return false; } // TODO Should test other Android browsers, especially Chrome if (isAndroid() && isWebkit() && getWebkitVersion() >= 534) { return false; } // iOS 6+ Safari supports native scrolling; iOS 5 suffers from #8792 // TODO Should test other iOS browsers if (isIOS() && isWebkit() && getOperatingSystemMajorVersion() >= 6) { return false; } if (isIE()) { return false; } return true; }
3.68
rocketmq-connect_AvroDatumWriterFactory_getDatumWriter
/** * get datum writer * * @param value * @param schema * @return */ private DatumWriter<?> getDatumWriter(Object value, Schema schema) { if (value instanceof SpecificRecord) { return new SpecificDatumWriter<>(schema); } else if (useSchemaReflection) { return new ReflectDatumWriter<>(schema); } else { GenericData genericData = new GenericData(); if (avroUseLogicalTypeConverters) { addLogicalTypeConversion(genericData); } return new GenericDatumWriter<>(schema, genericData); } }
3.68
hbase_MobUtils_removeMobFiles
/** * Archives the mob files. * @param conf The current configuration. * @param fs The current file system. * @param tableName The table name. * @param tableDir The table directory. * @param family The name of the column family. * @param storeFiles The files to be deleted. */ public static boolean removeMobFiles(Configuration conf, FileSystem fs, TableName tableName, Path tableDir, byte[] family, Collection<HStoreFile> storeFiles) { try { HFileArchiver.archiveStoreFiles(conf, fs, getMobRegionInfo(tableName), tableDir, family, storeFiles); LOG.info("Table {} {} expired mob files are deleted", tableName, storeFiles.size()); return true; } catch (IOException e) { LOG.error("Failed to delete the mob files, table {}", tableName, e); } return false; }
3.68
framework_WindowWaiAriaRoles_setup
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server. * VaadinRequest) */ @Override protected void setup(VaadinRequest request) { Button closeButton = new Button("Close windows"); closeButton.addClickListener(event -> { while (!windows.isEmpty()) { Window window = windows.pop(); removeWindow(window); } }); Button regularButton = new Button("Regular"); regularButton.addClickListener(event -> { Window regularWindow = new Window("Regular window"); openWindow(regularWindow); }); Button alertButton = new Button("Alert"); alertButton.addClickListener(event -> { Window alertWindow = new Window("Alert window"); alertWindow.setAssistiveRole(WindowRole.ALERTDIALOG); openWindow(alertWindow); }); addComponent(closeButton); addComponent(regularButton); addComponent(alertButton); }
3.68
morf_InsertStatementBuilder_getSelectStatement
/** * Gets the select statement which will generate the data for the insert. * * @return the select statement to use, or null if none is specified. */ SelectStatement getSelectStatement() { return selectStatement; }
3.68
hbase_MetricsConnection_getRpcCounters
/** rpcCounters metric */ public ConcurrentMap<String, Counter> getRpcCounters() { return rpcCounters; }
3.68
hbase_SyncTable_findNextKeyHashPair
/** * Attempt to read the next source key/hash pair. If there are no more, set nextSourceKey to * null */ private void findNextKeyHashPair() throws IOException { boolean hasNext = sourceHashReader.next(); if (hasNext) { nextSourceKey = sourceHashReader.getCurrentKey(); } else { // no more keys - last hash goes to the end nextSourceKey = null; } }
3.68
pulsar_FieldParser_stringToBoolean
/** * Converts String to Boolean. * * @param value * The String to be converted. * @return The converted Boolean value. */ public static Boolean stringToBoolean(String value) { return Boolean.valueOf(value); }
3.68
flink_TypeInference_typedArguments
/** @see #typedArguments(List) */ public Builder typedArguments(DataType... argumentTypes) { return typedArguments(Arrays.asList(argumentTypes)); }
3.68
dubbo_DefaultApplicationDeployer_useRegistryAsConfigCenterIfNecessary
/** * For compatibility purpose, use registry as the default config center when * there's no config center specified explicitly and * useAsConfigCenter of registryConfig is null or true */ private void useRegistryAsConfigCenterIfNecessary() { // we use the loading status of DynamicConfiguration to decide whether ConfigCenter has been initiated. if (environment.getDynamicConfiguration().isPresent()) { return; } if (CollectionUtils.isNotEmpty(configManager.getConfigCenters())) { return; } // load registry configManager.loadConfigsOfTypeFromProps(RegistryConfig.class); List<RegistryConfig> defaultRegistries = configManager.getDefaultRegistries(); if (defaultRegistries.size() > 0) { defaultRegistries.stream() .filter(this::isUsedRegistryAsConfigCenter) .map(this::registryAsConfigCenter) .forEach(configCenter -> { if (configManager.getConfigCenter(configCenter.getId()).isPresent()) { return; } configManager.addConfigCenter(configCenter); logger.info("use registry as config-center: " + configCenter); }); } }
3.68
flink_RemoteInputChannel_getInflightBuffersUnsafe
/** * Returns a list of buffers, checking the first n non-priority buffers, and skipping all * events. */ private List<Buffer> getInflightBuffersUnsafe(long checkpointId) { assert Thread.holdsLock(receivedBuffers); checkState(checkpointId == lastBarrierId || lastBarrierId == NONE); final List<Buffer> inflightBuffers = new ArrayList<>(); Iterator<SequenceBuffer> iterator = receivedBuffers.iterator(); // skip all priority events (only buffers are stored anyways) Iterators.advance(iterator, receivedBuffers.getNumPriorityElements()); while (iterator.hasNext()) { SequenceBuffer sequenceBuffer = iterator.next(); if (sequenceBuffer.buffer.isBuffer()) { if (shouldBeSpilled(sequenceBuffer.sequenceNumber)) { inflightBuffers.add(sequenceBuffer.buffer.retainBuffer()); } else { break; } } } return inflightBuffers; }
3.68
hbase_MetricsConnection_incrConnectionCount
/** Increment the connection count of the metrics within a scope */ private void incrConnectionCount() { connectionCount.inc(); }
3.68
flink_BlobServer_putInputStream
/** * Uploads the data from the given input stream for the given job to the BLOB server. * * @param jobId the ID of the job the BLOB belongs to * @param inputStream the input stream to read the data from * @param blobType whether to make the data permanent or transient * @return the computed BLOB key identifying the BLOB on the server * @throws IOException thrown if an I/O error occurs while reading the data from the input * stream, writing it to a local file, or uploading it to the HA store */ private BlobKey putInputStream( @Nullable JobID jobId, InputStream inputStream, BlobKey.BlobType blobType) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Received PUT call for BLOB of job {}.", jobId); } File incomingFile = createTemporaryFilename(); BlobKey blobKey = null; try { MessageDigest md = writeStreamToFileAndCreateDigest(inputStream, incomingFile); // persist file blobKey = moveTempFileToStore(incomingFile, jobId, md.digest(), blobType); return blobKey; } finally { // delete incomingFile from a failed download if (!incomingFile.delete() && incomingFile.exists()) { LOG.warn( "Could not delete the staging file {} for blob key {} and job {}.", incomingFile, blobKey, jobId); } } }
3.68
hadoop_RLESparseResourceAllocation_getEarliestStartTime
/** * Get the timestamp of the earliest resource allocation. * * @return the timestamp of the first resource allocation */ public long getEarliestStartTime() { readLock.lock(); try { if (cumulativeCapacity.isEmpty()) { return -1; } else { return cumulativeCapacity.firstKey(); } } finally { readLock.unlock(); } }
3.68
hudi_ExecutorFactory_isBufferingRecords
/** * Checks whether configured {@link HoodieExecutor} buffer records (for ex, by holding them * in the queue) */ public static boolean isBufferingRecords(HoodieWriteConfig config) { ExecutorType executorType = config.getExecutorType(); switch (executorType) { case BOUNDED_IN_MEMORY: case DISRUPTOR: return true; case SIMPLE: return false; default: throw new HoodieException("Unsupported Executor Type " + executorType); } }
3.68
dubbo_RestProtocol_getContextPath
/** * getPath() will return: [contextpath + "/" +] path * 1. contextpath is empty if user does not set through ProtocolConfig or ProviderConfig * 2. path will never be empty, its default value is the interface name. * * @return return path only if user has explicitly gave then a value. */ private String getContextPath(URL url) { String contextPath = url.getPath(); if (contextPath != null) { if (contextPath.equalsIgnoreCase(url.getParameter(INTERFACE_KEY))) { return ""; } if (contextPath.endsWith(url.getParameter(INTERFACE_KEY))) { contextPath = contextPath.substring(0, contextPath.lastIndexOf(url.getParameter(INTERFACE_KEY))); } return contextPath.endsWith(PATH_SEPARATOR) ? contextPath.substring(0, contextPath.length() - 1) : contextPath; } else { return ""; } }
3.68
hadoop_KMSAuditLogger_setEndTime
/** * Set the time this audit event is finished. */ void setEndTime(long endTime) { this.endTime = endTime; }
3.68
framework_Tree_removeActionHandler
/** * Removes an action handler. * * @see com.vaadin.event.Action.Container#removeActionHandler(Action.Handler) */ @Override public void removeActionHandler(Action.Handler actionHandler) { if (actionHandlers != null && actionHandlers.contains(actionHandler)) { actionHandlers.remove(actionHandler); if (actionHandlers.isEmpty()) { actionHandlers = null; actionMapper = null; } markAsDirty(); } }
3.68
flink_InPlaceMutableHashTable_getCapacity
/** * Gets the total capacity of this hash table, in bytes. * * @return The hash table's total capacity. */ public long getCapacity() { return numAllMemorySegments * (long) segmentSize; }
3.68
flink_DataSinkTask_getLogString
/** * Utility function that composes a string for logging purposes. The string includes the given * message and the index of the task in its task group together with the number of tasks in the * task group. * * @param message The main message for the log. * @return The string ready for logging. */ private String getLogString(String message) { return BatchTask.constructLogString( message, this.getEnvironment().getTaskInfo().getTaskName(), this); }
3.68
framework_LegacyCommunicationManager_getConnector
/** * @deprecated As of 7.1. In 7.2 and later, use * {@link ConnectorTracker#getConnector(String) * uI.getConnectorTracker().getConnector(connectorId)} instead. * See ticket #11411. */ @Deprecated public ClientConnector getConnector(UI uI, String connectorId) { return uI.getConnectorTracker().getConnector(connectorId); }
3.68
hbase_HRegionServer_closeRegionIgnoreErrors
/** * Try to close the region, logs a warning on failure but continues. * @param region Region to close */ private void closeRegionIgnoreErrors(RegionInfo region, final boolean abort) { try { if (!closeRegion(region.getEncodedName(), abort, null)) { LOG .warn("Failed to close " + region.getRegionNameAsString() + " - ignoring and continuing"); } } catch (IOException e) { LOG.warn("Failed to close " + region.getRegionNameAsString() + " - ignoring and continuing", e); } }
3.68
hbase_LeaseManager_resetExpirationTime
/** * Resets the expiration time of the lease. */ public void resetExpirationTime() { this.expirationTime = EnvironmentEdgeManager.currentTime() + this.leaseTimeoutPeriod; }
3.68
flink_FineGrainedSlotManager_freeSlot
/** * Free the given slot from the given allocation. If the slot is still allocated by the given * allocation id, then the slot will be freed. * * @param slotId identifying the slot to free, will be ignored * @param allocationId with which the slot is presumably allocated */ @Override public void freeSlot(SlotID slotId, AllocationID allocationId) { checkInit(); LOG.debug("Freeing slot {}.", allocationId); if (taskManagerTracker.getAllocatedOrPendingSlot(allocationId).isPresent()) { slotStatusSyncer.freeSlot(allocationId); checkResourceRequirementsWithDelay(); } else { LOG.debug( "Trying to free a slot {} which has not been allocated. Ignoring this message.", allocationId); } }
3.68
framework_AbstractBeanContainer_addNestedContainerProperty
/** * Adds a nested container property for the container, e.g. * "manager.address.street". * * All intermediate getters must exist and should return non-null values * when the property value is accessed. If an intermediate getter returns * null, a null value will be returned. * * @see NestedMethodProperty * * @param propertyId * @return true if the property was added */ public boolean addNestedContainerProperty(String propertyId) { return addContainerProperty(propertyId, new NestedPropertyDescriptor(propertyId, type)); }
3.68
hbase_LogRollRegionServerProcedureManager_stop
/** * Close <tt>this</tt> and all running backup procedure tasks * @param force forcefully stop all running tasks * @throws IOException exception */ @Override public void stop(boolean force) throws IOException { if (!started) { return; } String mode = force ? "abruptly" : "gracefully"; LOG.info("Stopping RegionServerBackupManager " + mode + "."); try { this.member.close(); } finally { this.memberRpcs.close(); } }
3.68
hbase_CatalogJanitor_scan
/** * Run janitorial scan of catalog <code>hbase:meta</code> table looking for garbage to collect. * @return How many items gc'd whether for merge or split. Returns -1 if previous scan is in * progress. */ public int scan() throws IOException { int gcs = 0; try { if (!alreadyRunning.compareAndSet(false, true)) { if (LOG.isDebugEnabled()) { LOG.debug("CatalogJanitor already running"); } // -1 indicates previous scan is in progress return -1; } this.lastReport = scanForReport(); if (!this.lastReport.isEmpty()) { LOG.warn(this.lastReport.toString()); } else { if (LOG.isDebugEnabled()) { LOG.debug(this.lastReport.toString()); } } updateAssignmentManagerMetrics(); Map<RegionInfo, Result> mergedRegions = this.lastReport.mergedRegions; for (Map.Entry<RegionInfo, Result> e : mergedRegions.entrySet()) { if (this.services.isInMaintenanceMode()) { // Stop cleaning if the master is in maintenance mode LOG.debug("In maintenance mode, not cleaning"); break; } List<RegionInfo> parents = CatalogFamilyFormat.getMergeRegions(e.getValue().rawCells()); if (parents != null && cleanMergeRegion(this.services, e.getKey(), parents)) { gcs++; } } // Clean split parents Map<RegionInfo, Result> splitParents = this.lastReport.splitParents; // Now work on our list of found parents. See if any we can clean up. HashSet<String> parentNotCleaned = new HashSet<>(); for (Map.Entry<RegionInfo, Result> e : splitParents.entrySet()) { if (this.services.isInMaintenanceMode()) { // Stop cleaning if the master is in maintenance mode if (LOG.isDebugEnabled()) { LOG.debug("In maintenance mode, not cleaning"); } break; } if ( !parentNotCleaned.contains(e.getKey().getEncodedName()) && cleanParent(e.getKey(), e.getValue()) ) { gcs++; } else { // We could not clean the parent, so it's daughters should not be // cleaned either (HBASE-6160) PairOfSameType<RegionInfo> daughters = MetaTableAccessor.getDaughterRegions(e.getValue()); parentNotCleaned.add(daughters.getFirst().getEncodedName()); parentNotCleaned.add(daughters.getSecond().getEncodedName()); } } return gcs; } finally { alreadyRunning.set(false); } }
3.68
hbase_ProcedureStoreTracker_lookupClosestNode
/** * lookup the node containing the specified procId. * @param node cached node to check before doing a lookup * @param procId the procId to lookup * @return the node that may contains the procId or null */ private BitSetNode lookupClosestNode(final BitSetNode node, final long procId) { if (node != null && node.contains(procId)) { return node; } final Map.Entry<Long, BitSetNode> entry = map.floorEntry(procId); return entry != null ? entry.getValue() : null; }
3.68
hadoop_ResourceBundles_getCounterGroupName
/** * Get the counter group display name * @param group the group name to lookup * @param defaultValue of the group * @return the group display name */ public static String getCounterGroupName(String group, String defaultValue) { return getValue(group, "CounterGroupName", "", defaultValue); }
3.68
pulsar_NamespaceIsolationPolicies_getPolicyByName
/** * Access method to get the namespace isolation policy by the policy name. * * @param policyName * @return */ public NamespaceIsolationPolicy getPolicyByName(String policyName) { if (policies.get(policyName) == null) { return null; } return new NamespaceIsolationPolicyImpl(policies.get(policyName)); }
3.68
hadoop_TaskId_toString
/** * Print method for TaskId. * @return : Full TaskId which is TaskId_prefix + jobId + _ + TaskId */ public final String toString() { return TASK_ID_PREFIX + jobId.getID() + "_" + taskId; }
3.68
hadoop_OperationDuration_toString
/** * Return the duration as {@link #humanTime(long)}. * @return a printable duration. */ @Override public String toString() { return getDurationString(); }
3.68
flink_LastDatedValueFunction_accumulate
/** * Generic runtime function that will be called with different kind of instances for {@code * input} depending on actual call in the query. */ public void accumulate(Accumulator<T> acc, T input, LocalDate date) { if (input != null && (acc.date == null || date.isAfter(acc.date))) { acc.value = input; acc.date = date; } }
3.68
hbase_MetaTableAccessor_getTargetServerName
/** * Returns the {@link ServerName} from catalog table {@link Result} where the region is * transitioning on. It should be the same as * {@link CatalogFamilyFormat#getServerName(Result,int)} if the server is at OPEN state. * @param r Result to pull the transitioning server name from * @return A ServerName instance or {@link CatalogFamilyFormat#getServerName(Result,int)} if * necessary fields not found or empty. */ @Nullable public static ServerName getTargetServerName(final Result r, final int replicaId) { final Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerNameColumn(replicaId)); if (cell == null || cell.getValueLength() == 0) { RegionLocations locations = CatalogFamilyFormat.getRegionLocations(r); if (locations != null) { HRegionLocation location = locations.getRegionLocation(replicaId); if (location != null) { return location.getServerName(); } } return null; } return ServerName.parseServerName( Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); }
3.68
flink_HyperLogLogPlusPlus_updateByHashcode
/** Update the HLL++ buffer. */ public void updateByHashcode(HllBuffer buffer, long hash) { // Determine the index of the register we are going to use. int idx = (int) (hash >>> idxShift); // Determine the number of leading zeros in the remaining bits 'w'. long pw = Long.numberOfLeadingZeros((hash << p) | wPadding) + 1L; // Get the word containing the register we are interested in. int wordOffset = idx / REGISTERS_PER_WORD; long word = buffer.array[wordOffset]; // Extract the M[J] register value from the word. int shift = REGISTER_SIZE * (idx - (wordOffset * REGISTERS_PER_WORD)); long mask = REGISTER_WORD_MASK << shift; long mIdx = (word & mask) >>> shift; // Assign the maximum number of leading zeros to the register. if (pw > mIdx) { buffer.array[wordOffset] = (word & ~mask) | (pw << shift); } }
3.68
hadoop_TimelineEntityType_isChild
/** * Whether the input type can be a child of this entity. * * @param type entity type. * @return true, if this entity type is child of passed entity type, false * otherwise. */ public boolean isChild(TimelineEntityType type) { switch (this) { case YARN_CLUSTER: return YARN_FLOW_RUN == type || YARN_APPLICATION == type; case YARN_FLOW_RUN: return YARN_FLOW_RUN == type || YARN_APPLICATION == type; case YARN_APPLICATION: return YARN_APPLICATION_ATTEMPT == type; case YARN_APPLICATION_ATTEMPT: return YARN_CONTAINER == type; case YARN_CONTAINER: return false; case YARN_QUEUE: return YARN_QUEUE == type; default: return false; } }
3.68
framework_VDebugWindow_resetTimer
/** * Resets the timer. * * @return Milliseconds elapsed since the timer was last reset. */ static int resetTimer() { int sinceLast = lastReset.elapsedMillis(); lastReset = null; return sinceLast; }
3.68
hbase_ReplicationSourceManager_getStats
/** * Get a string representation of all the sources' metrics */ public String getStats() { StringBuilder stats = new StringBuilder(); // Print stats that apply across all Replication Sources stats.append("Global stats: "); stats.append("WAL Edits Buffer Used=").append(getTotalBufferUsed()).append("B, Limit=") .append(getTotalBufferLimit()).append("B\n"); for (ReplicationSourceInterface source : this.sources.values()) { stats.append("Normal source for cluster " + source.getPeerId() + ": "); stats.append(source.getStats() + "\n"); } for (ReplicationSourceInterface oldSource : oldsources) { stats.append("Recovered source for cluster/machine(s) " + oldSource.getPeerId() + ": "); stats.append(oldSource.getStats() + "\n"); } return stats.toString(); }
3.68
hbase_HRegion_waitForFlushesAndCompactions
/** Wait for all current flushes and compactions of the region to complete */ // TODO HBASE-18906. Check the usage (if any) in Phoenix and expose this or give alternate way for // Phoenix needs. public void waitForFlushesAndCompactions() { synchronized (writestate) { if (this.writestate.readOnly) { // we should not wait for replayed flushed if we are read only (for example in case the // region is a secondary replica). return; } boolean interrupted = false; try { while (writestate.compacting.get() > 0 || writestate.flushing) { LOG.debug("waiting for " + writestate.compacting + " compactions" + (writestate.flushing ? " & cache flush" : "") + " to complete for region " + this); try { writestate.wait(); } catch (InterruptedException iex) { // essentially ignore and propagate the interrupt back up LOG.warn("Interrupted while waiting in region {}", this); interrupted = true; break; } } } finally { if (interrupted) { Thread.currentThread().interrupt(); } } } }
3.68
open-banking-gateway_ResultBody_getBody
/** * Body of the results - i.e. account list. */ @JsonIgnore default Object getBody() { return null; }
3.68
hadoop_BlockReconstructionWork_setNotEnoughRack
/** * Mark that the reconstruction work is to replicate internal block to a new * rack. */ void setNotEnoughRack() { notEnoughRack = true; }
3.68
flink_HiveInspectors_toInspectors
/** Get an array of ObjectInspector from the give array of args and their types. */ public static ObjectInspector[] toInspectors( HiveShim hiveShim, Object[] args, DataType[] argTypes) { assert args.length == argTypes.length; ObjectInspector[] argumentInspectors = new ObjectInspector[argTypes.length]; for (int i = 0; i < argTypes.length; i++) { Object constant = args[i]; if (constant == null) { argumentInspectors[i] = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo( HiveTypeUtil.toHiveTypeInfo(argTypes[i], false)); } else { PrimitiveTypeInfo primitiveTypeInfo = (PrimitiveTypeInfo) HiveTypeUtil.toHiveTypeInfo(argTypes[i], false); constant = getConversion( getObjectInspector(primitiveTypeInfo), argTypes[i].getLogicalType(), hiveShim) .toHiveObject(constant); argumentInspectors[i] = getObjectInspectorForPrimitiveConstant( primitiveTypeInfo, constant, hiveShim); } } return argumentInspectors; }
3.68
hadoop_BlockBlobAppendStream_maybeThrowFirstError
/** * Throw the first error caught if it has not been raised already * @throws IOException if one is caught and needs to be thrown. */ private void maybeThrowFirstError() throws IOException { if (firstError.get() != null) { firstErrorThrown = true; throw firstError.get(); } }
3.68
hbase_ParseFilter_getFilterArguments
/** * Returns the arguments of the filter from the filter string * <p> * @param filterStringAsByteArray filter string given by the user * @return an ArrayList containing the arguments of the filter in the filter string */ public static ArrayList<byte[]> getFilterArguments(byte[] filterStringAsByteArray) { int argumentListStartIndex = Bytes.searchDelimiterIndex(filterStringAsByteArray, 0, filterStringAsByteArray.length, ParseConstants.LPAREN); if (argumentListStartIndex == -1) { throw new IllegalArgumentException("Incorrect argument list"); } int argumentStartIndex = 0; int argumentEndIndex = 0; ArrayList<byte[]> filterArguments = new ArrayList<>(); for (int i = argumentListStartIndex + 1; i < filterStringAsByteArray.length; i++) { if ( filterStringAsByteArray[i] == ParseConstants.WHITESPACE || filterStringAsByteArray[i] == ParseConstants.COMMA || filterStringAsByteArray[i] == ParseConstants.RPAREN ) { continue; } // The argument is in single quotes - for example 'prefix' if (filterStringAsByteArray[i] == ParseConstants.SINGLE_QUOTE) { argumentStartIndex = i; for (int j = argumentStartIndex + 1; j < filterStringAsByteArray.length; j++) { if (filterStringAsByteArray[j] == ParseConstants.SINGLE_QUOTE) { if (isQuoteUnescaped(filterStringAsByteArray, j)) { argumentEndIndex = j; i = j + 1; byte[] filterArgument = createUnescapdArgument(filterStringAsByteArray, argumentStartIndex, argumentEndIndex); filterArguments.add(filterArgument); break; } else { // To jump over the second escaped quote j++; } } else if (j == filterStringAsByteArray.length - 1) { throw new IllegalArgumentException("Incorrect argument list"); } } } else { // The argument is an integer, boolean, comparison operator like <, >, != etc argumentStartIndex = i; for (int j = argumentStartIndex; j < filterStringAsByteArray.length; j++) { if ( filterStringAsByteArray[j] == ParseConstants.WHITESPACE || filterStringAsByteArray[j] == ParseConstants.COMMA || filterStringAsByteArray[j] == ParseConstants.RPAREN ) { argumentEndIndex = j - 1; i = j; byte[] filterArgument = new byte[argumentEndIndex - argumentStartIndex + 1]; Bytes.putBytes(filterArgument, 0, filterStringAsByteArray, argumentStartIndex, argumentEndIndex - argumentStartIndex + 1); filterArguments.add(filterArgument); break; } else if (j == filterStringAsByteArray.length - 1) { throw new IllegalArgumentException("Incorrect argument list"); } } } } return filterArguments; }
3.68
framework_AbsoluteLayout_replaceComponent
/** * Replaces one component with another one. The new component inherits the * old components position. */ @Override public void replaceComponent(Component oldComponent, Component newComponent) { ComponentPosition position = getPosition(oldComponent); removeComponent(oldComponent); addComponent(newComponent, position); }
3.68
hbase_Result_size
/** Returns the size of the underlying Cell [] */ public int size() { return this.cells == null ? 0 : this.cells.length; }
3.68
hbase_BufferedMutatorOverAsyncBufferedMutator_getHostnameAndPort
// not always work, so may return an empty string private String getHostnameAndPort(Throwable error) { Matcher matcher = ADDR_MSG_MATCHER.matcher(error.getMessage()); if (matcher.matches()) { return matcher.group(1); } else { return ""; } }
3.68
framework_Button_getCustomAttributes
/* * (non-Javadoc) * * @see com.vaadin.ui.AbstractComponent#getCustomAttributes() */ @Override protected Collection<String> getCustomAttributes() { Collection<String> result = super.getCustomAttributes(); result.add(DESIGN_ATTR_PLAIN_TEXT); result.add("caption"); result.add("icon-alt"); result.add("icon-alternate-text"); result.add("click-shortcut"); result.add("html-content-allowed"); result.add("caption-as-html"); return result; }
3.68
flink_ResultPartitionType_isBounded
/** * Whether this partition uses a limited number of (network) buffers or not. * * @return <tt>true</tt> if the number of buffers should be bound to some limit */ public boolean isBounded() { return isBounded; }
3.68
framework_Window_getAssistiveDescription
/** * Gets the components that are used as assistive description. Text * contained in these components will be read by assistive devices when the * window is opened. * * @return array of previously set components */ public Component[] getAssistiveDescription() { Connector[] contentDescription = getState(false).contentDescription; if (contentDescription == null) { return null; } Component[] target = new Component[contentDescription.length]; System.arraycopy(contentDescription, 0, target, 0, contentDescription.length); return target; }
3.68
hadoop_OBSFileSystem_getDefaultBlockSize
/** * Imitate HDFS to return the number of bytes that large input files should be * optimally split into to minimize I/O time. The given path will be used to * locate the actual filesystem. The full path does not have to exist. * * @param f path of file * @return the default block size for the path's filesystem */ @Override public long getDefaultBlockSize(final Path f) { return blockSize; }
3.68
pulsar_MultiTopicsConsumerImpl_topicNamesValid
// Check topics are valid. // - each topic is valid, // - topic names are unique. private static boolean topicNamesValid(Collection<String> topics) { checkState(topics != null && topics.size() >= 1, "topics should contain more than 1 topic"); Optional<String> result = topics.stream() .filter(topic -> !TopicName.isValid(topic)) .findFirst(); if (result.isPresent()) { log.warn("Received invalid topic name: {}", result.get()); return false; } // check topic names are unique HashSet<String> set = new HashSet<>(topics); if (set.size() == topics.size()) { return true; } else { log.warn("Topic names not unique. unique/all : {}/{}", set.size(), topics.size()); return false; } }
3.68
hbase_RegionPlacementMaintainer_transform
/** * Copy a given matrix into a new matrix, transforming each row index and each column index * according to the randomization scheme that was created at construction time. * @param matrix the cost matrix to transform * @return a new matrix with row and column indices transformed */ public float[][] transform(float[][] matrix) { float[][] result = new float[rows][cols]; for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { result[rowTransform[i]][colTransform[j]] = matrix[i][j]; } } return result; }
3.68
framework_AbstractDateFieldElement_setISOValue
/** * Sets the value of the date field as a ISO8601 compatible string * (yyyy-MM-dd or yyyy-MM-dd'T'HH:mm:ss depending on whether the element * supports time). * * @param isoDateValue * the date in ISO-8601 format * @since 8.1.0 */ protected void setISOValue(String isoDateValue) { getCommandExecutor().executeScript( "arguments[0].setISOValue(arguments[1]);", this, isoDateValue); }
3.68
hbase_LocalHBaseCluster_getMaster
/** Returns the HMaster thread */ public HMaster getMaster(int serverNumber) { return masterThreads.get(serverNumber).getMaster(); }
3.68
framework_AbstractComponentConnector_registerTouchHandlers
/** * The new default behavior is for long taps to fire a contextclick event if * there's a contextclick listener attached to the component. * * If you do not want this in your component, override this with a blank * method to get rid of said behavior. * * Some Vaadin Components already handle the long tap as a context menu. * This method is unnecessary for those. * * @since 7.6 */ protected void registerTouchHandlers() { Widget widget = getWidget(); touchStartHandler = widget.addDomHandler(event -> { if (longTouchTimer != null && longTouchTimer.isRunning()) { return; } // Prevent selection for the element while pending long tap. WidgetUtil.setTextSelectionEnabled(widget.getElement(), false); if (BrowserInfo.get().isAndroid()) { // Android fires ContextMenu events automatically. return; } /* * we need to build mouseEventDetails eagerly - the event won't be * guaranteed to be around when the timer executes. At least this * was the case with iOS devices. */ final MouseEventDetails mouseEventDetails = MouseEventDetailsBuilder .buildMouseEventDetails(event.getNativeEvent(), widget.getElement()); final EventTarget eventTarget = event.getNativeEvent() .getEventTarget(); longTouchTimer = new Timer() { @Override public void run() { // we're handling this event, our parent components // don't need to bother with it anymore. cancelParentTouchTimers(); // The default context click // implementation only provides the // mouse coordinates relative to root // element of widget. sendContextClickEvent(mouseEventDetails, eventTarget); preventNextTouchEnd = true; } }; Touch touch = event.getChangedTouches().get(0); touchStartX = touch.getClientX(); touchStartY = touch.getClientY(); longTouchTimer.schedule(TOUCH_CONTEXT_MENU_TIMEOUT); }, TouchStartEvent.getType()); touchMoveHandler = widget.addDomHandler(new TouchMoveHandler() { @Override public void onTouchMove(TouchMoveEvent event) { if (isSignificantMove(event)) { // Moved finger before the context menu timer // expired, so let the browser handle the event. cancelTouchTimer(); } } // mostly copy-pasted code from VScrollTable // TODO refactor main logic to a common class private boolean isSignificantMove(TouchMoveEvent event) { if (longTouchTimer == null) { // no touch start return false; } // Calculate the distance between touch start and the current // touch // position Touch touch = event.getChangedTouches().get(0); int deltaX = touch.getClientX() - touchStartX; int deltaY = touch.getClientY() - touchStartY; int delta = deltaX * deltaX + deltaY * deltaY; // Compare to the square of the significant move threshold to // remove the need for a square root if (delta > SIGNIFICANT_MOVE_THRESHOLD * SIGNIFICANT_MOVE_THRESHOLD) { return true; } return false; } }, TouchMoveEvent.getType()); touchEndHandler = widget.addDomHandler(event -> { // cancel the timer so the event doesn't fire cancelTouchTimer(); if (preventNextTouchEnd) { event.preventDefault(); preventNextTouchEnd = false; } }, TouchEndEvent.getType()); }
3.68
framework_DefaultEditorEventHandler_handleOpenEvent
/** * Opens the editor on the appropriate row if the received event is an open * event. The default implementation uses * {@link #isOpenEvent(EditorDomEvent) isOpenEvent}. * * @param event * the received event * @return true if this method handled the event and nothing else should be * done, false otherwise */ protected boolean handleOpenEvent(EditorDomEvent<T> event) { if (isOpenEvent(event)) { final EventCellReference<T> cell = event.getCell(); editRow(event, cell.getRowIndex(), cell.getColumnIndexDOM()); event.getDomEvent().preventDefault(); return true; } return false; }
3.68
hadoop_ReencryptionHandler_submitCurrentBatch
/** * Submit the current batch to the thread pool. * * @param zoneId * Id of the EZ INode * @throws IOException * @throws InterruptedException */ @Override protected void submitCurrentBatch(final Long zoneId) throws IOException, InterruptedException { if (currentBatch.isEmpty()) { return; } ZoneSubmissionTracker zst; synchronized (ReencryptionHandler.this) { zst = submissions.get(zoneId); if (zst == null) { zst = new ZoneSubmissionTracker(); submissions.put(zoneId, zst); } Future future = batchService.submit(new EDEKReencryptCallable(zoneId, currentBatch, reencryptionHandler)); zst.addTask(future); } LOG.info("Submitted batch (start:{}, size:{}) of zone {} to re-encrypt.", currentBatch.getFirstFilePath(), currentBatch.size(), zoneId); currentBatch = new ReencryptionBatch(reencryptBatchSize); // flip the pause flag if this is nth submission. // The actual pause need to happen outside of the lock. if (pauseAfterNthSubmission > 0) { if (--pauseAfterNthSubmission == 0) { shouldPauseForTesting = true; } } }
3.68
hudi_BootstrapIndex_useIndex
/** * Returns true if valid metadata bootstrap is present. * @return */ public final boolean useIndex() { if (isPresent()) { boolean validInstantTime = metaClient.getActiveTimeline().getCommitsTimeline().filterCompletedInstants().lastInstant() .map(i -> HoodieTimeline.compareTimestamps(i.getTimestamp(), HoodieTimeline.GREATER_THAN_OR_EQUALS, HoodieTimeline.METADATA_BOOTSTRAP_INSTANT_TS)).orElse(false); return validInstantTime && metaClient.getTableConfig().getBootstrapBasePath().isPresent(); } else { return false; } }
3.68
hbase_SteppingSplitPolicy_getSizeToCheck
/** * @return flushSize * 2 if there's exactly one region of the table in question found on this * regionserver. Otherwise max file size. This allows a table to spread quickly across * servers, while avoiding creating too many regions. */ @Override protected long getSizeToCheck(final int tableRegionsCount) { return tableRegionsCount == 1 ? this.initialSize : getDesiredMaxFileSize(); }
3.68
hadoop_SuccessData_recordJobFailure
/** * Note a failure by setting success flag to false, * then add the exception to the diagnostics. * @param thrown throwable */ public void recordJobFailure(Throwable thrown) { setSuccess(false); String stacktrace = ExceptionUtils.getStackTrace(thrown); diagnostics.put("exception", thrown.toString()); diagnostics.put("stacktrace", stacktrace); }
3.68
framework_Table_setColumnFooter
/** * Sets the column footer caption. The column footer caption is the text * displayed beneath the column if footers have been set visible. * * @param propertyId * The properyId of the column * * @param footer * The caption of the footer */ public void setColumnFooter(Object propertyId, String footer) { if (footer == null) { columnFooters.remove(propertyId); } else { columnFooters.put(propertyId, footer); } markAsDirty(); }
3.68