name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hudi_ConfigurationHotUpdateStrategyUtils_createConfigurationHotUpdateStrategy
/** * Creates a {@link ConfigurationHotUpdateStrategy} class via reflection. * * <p>If the class name of {@link ConfigurationHotUpdateStrategy} is configured * through the {@link HoodieStreamer.Config#configHotUpdateStrategyClass}. */ public static Option<ConfigurationHotUpdateStrategy> createConfigurationHotUpdateStrategy( String strategyClass, HoodieStreamer.Config cfg, TypedProperties properties) throws HoodieException { try { return StringUtils.isNullOrEmpty(strategyClass) ? Option.empty() : Option.of((ConfigurationHotUpdateStrategy) ReflectionUtils.loadClass(strategyClass, cfg, properties)); } catch (Throwable e) { throw new HoodieException("Could not create configuration hot update strategy class " + strategyClass, e); } }
3.68
hbase_RegionStateStore_deleteRegions
/** * Deletes the specified regions. */ public void deleteRegions(final List<RegionInfo> regions) throws IOException { deleteRegions(regions, EnvironmentEdgeManager.currentTime()); }
3.68
hadoop_RawErasureEncoder_release
/** * Should be called when release this coder. Good chance to release encoding * or decoding buffers */ public void release() { // Nothing to do here. }
3.68
hadoop_Cluster_getFileSystem
/** * Get the file system where job-specific files are stored * * @return object of FileSystem * @throws IOException * @throws InterruptedException */ public synchronized FileSystem getFileSystem() throws IOException, InterruptedException { if (this.fs == null) { try { this.fs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() { public FileSystem run() throws IOException, InterruptedException { final Path sysDir = new Path(client.getSystemDir()); return sysDir.getFileSystem(getConf()); } }); } catch (InterruptedException e) { throw new RuntimeException(e); } } return fs; }
3.68
hbase_ReversedKeyValueHeap_compareRows
/** * Compares rows of two KeyValue * @return less than 0 if left is smaller, 0 if equal etc.. */ public int compareRows(Cell left, Cell right) { return super.kvComparator.compareRows(left, right); }
3.68
hmily_HmilyTransactionHolder_getInstance
/** * Gets instance. * * @return the instance */ public static HmilyTransactionHolder getInstance() { return INSTANCE; }
3.68
framework_TextFileProperty_getType
/* * (non-Javadoc) * * @see com.vaadin.data.Property#getType() */ @Override public Class<String> getType() { return String.class; }
3.68
graphhopper_ShortestPathTree_setTimeLimit
/** * Time limit in milliseconds */ public void setTimeLimit(double limit) { exploreType = TIME; this.limit = limit; this.queueByZ = new PriorityQueue<>(1000, comparingLong(l -> l.time)); }
3.68
framework_EventRouter_addListener
/* * Registers a new listener with the specified named activation method to * listen events generated by this component. Don't add a JavaDoc comment * here, we use the default documentation from implemented interface. */ @Override public Registration addListener(Class<?> eventType, SerializableEventListener listener, String methodName) { Objects.requireNonNull(listener, "Listener must not be null."); if (listenerList == null) { listenerList = new LinkedHashSet<>(); } ListenerMethod listenerMethod = new ListenerMethod(eventType, listener, methodName); listenerList.add(listenerMethod); return () -> listenerList.remove(listenerMethod); }
3.68
hbase_WALSplitter_splitLogFile
/** * Splits a WAL file. Used by old {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker} and * tests. Not used by new procedure-based WAL splitter. * @return false if it is interrupted by the progress-able. */ public static boolean splitLogFile(Path walDir, FileStatus logfile, FileSystem walFS, Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker, SplitLogWorkerCoordination splitLogWorkerCoordination, WALFactory factory, RegionServerServices rsServices) throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem rootFS = rootDir.getFileSystem(conf); WALSplitter splitter = new WALSplitter(factory, conf, walDir, walFS, rootDir, rootFS, idChecker, splitLogWorkerCoordination, rsServices); // splitWAL returns a data structure with whether split is finished and if the file is corrupt. // We don't need to propagate corruption flag here because it is propagated by the // SplitLogWorkerCoordination. return splitter.splitWAL(logfile, reporter).isFinished(); }
3.68
flink_DecimalData_precision
/** * Returns the <i>precision</i> of this {@link DecimalData}. * * <p>The precision is the number of digits in the unscaled value. */ public int precision() { return precision; }
3.68
dubbo_ServiceInstance_isHealthy
/** * The registered service instance is health or not. * * @return if <code>true</code>, indicates current instance is healthy, or unhealthy, the client may ignore this one. * The default value is <code>true</code> */ default boolean isHealthy() { return true; }
3.68
flink_CsvReader_fieldDelimiter
/** * Configures the delimiter that separates the fields within a row. The comma character ({@code * ','}) is used by default. * * @param delimiter The delimiter that separates the fields in one row. * @return The CSV reader instance itself, to allow for fluent function chaining. */ public CsvReader fieldDelimiter(String delimiter) { this.fieldDelimiter = delimiter; return this; }
3.68
framework_VTabsheet_getLastVisibleTab
/** * Returns the index of the last visible tab on the server. * * @return the index, or {@code -1} if not found */ private int getLastVisibleTab() { return getPreviousVisibleTab(getTabCount()); }
3.68
framework_AdjacentElementsWithTooltips_getTicketNumber
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber() */ @Override protected Integer getTicketNumber() { return 13998; }
3.68
flink_CatalogManager_setCurrentCatalog
/** * Sets the current catalog name that will be used when resolving table path. * * @param catalogName catalog name to set as current catalog * @throws CatalogNotExistException thrown if the catalog doesn't exist * @see CatalogManager#qualifyIdentifier(UnresolvedIdentifier) */ public void setCurrentCatalog(@Nullable String catalogName) throws CatalogNotExistException { if (catalogName == null) { this.currentCatalogName = null; this.currentDatabaseName = null; return; } checkArgument( !StringUtils.isNullOrWhitespaceOnly(catalogName), "Catalog name cannot be empty."); Catalog potentialCurrentCatalog = getCatalog(catalogName) .orElseThrow( () -> new CatalogException( format( "A catalog with name [%s] does not exist.", catalogName))); if (!catalogName.equals(currentCatalogName)) { currentCatalogName = catalogName; currentDatabaseName = potentialCurrentCatalog.getDefaultDatabase(); LOG.info( "Set the current default catalog as [{}] and the current default database as [{}].", currentCatalogName, currentDatabaseName); } }
3.68
dubbo_TriHttp2RemoteFlowController_writableWindow
/** * Returns the maximum writable window (minimum of the stream and connection windows). */ private int writableWindow() { return min(window, connectionWindowSize()); }
3.68
pulsar_PulsarAdminImpl_tenants
/** * @return the tenants management object */ public Tenants tenants() { return tenants; }
3.68
morf_OracleDialect_getColumnRepresentation
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#getColumnRepresentation(org.alfasoftware.morf.metadata.DataType, * int, int) */ @Override protected String getColumnRepresentation(DataType dataType, int width, int scale) { switch (dataType) { case STRING: // the null suffix here is potentially controversial, since oracle does // not distinguish between null and blank. // obey the metadata for now, since this makes the process reversible. return String.format("NVARCHAR2(%d)", width); case DECIMAL: return String.format("DECIMAL(%d,%d)", width, scale); case DATE: return "DATE"; case BOOLEAN: return "DECIMAL(1,0)"; case INTEGER: return "INTEGER"; case BIG_INTEGER: return "NUMBER(19)"; case BLOB: return "BLOB"; case CLOB: return "NCLOB"; default: throw new UnsupportedOperationException("Cannot map column with type [" + dataType + "]"); } }
3.68
hadoop_AbstractClientRequestInterceptor_init
/** * Initializes the {@link ClientRequestInterceptor}. */ @Override public void init(String userName) { this.user = RouterServerUtil.setupUser(userName); if (this.nextInterceptor != null) { this.nextInterceptor.init(userName); } }
3.68
framework_VComboBox_navigateItemAfterPageChange
/* * This method navigates to the proper item in the combobox page. This * should be executed after setSuggestions() method which is called from * VComboBox.showSuggestions(). ShowSuggestions() method builds the page * content. As far as setSuggestions() method is called as deferred, * navigateItemAfterPageChange method should be also be called as * deferred. #11333 */ private void navigateItemAfterPageChange() { if (navigationCallback != null) { // navigationCallback is not reset here but after any server // request in case you are in between two requests both changing // the page back and forth // we're paging w/ arrows navigationCallback.run(); navigationCallback = null; } }
3.68
hbase_MetricsMaster_incrementReadRequests
/** * @param inc How much to add to read requests. */ public void incrementReadRequests(final long inc) { masterSource.incReadRequests(inc); }
3.68
framework_VMenuBar_buildItemHTML
/** * Build the HTML content for a menu item. * <p> * For internal use only. May be removed or replaced in the future. * * @param separator * the menu item is separator * @param subMenu * the menu item contains submenu * @param iconUrl * the menu item icon URL or {@code null} * @param text * the menu item text. May not be {@code null} */ public String buildItemHTML(boolean separator, boolean subMenu, String iconUrl, String text) { // Construct html from the text and the optional icon StringBuilder itemHTML = new StringBuilder(); if (separator) { itemHTML.append("<span>---</span>"); } else { // Add submenu indicator if (subMenu) { String bgStyle = ""; itemHTML.append("<span class=\"" + getStylePrimaryName() + "-submenu-indicator\"" + bgStyle + " aria-hidden=\"true\">&#x25BA;</span>"); } itemHTML.append("<span class=\"" + getStylePrimaryName() + "-menuitem-caption\">"); Icon icon = client.getIcon(iconUrl); if (icon != null) { itemHTML.append(icon.getElement().getString()); } String itemText = text; if (!htmlContentAllowed) { itemText = WidgetUtil.escapeHTML(itemText); } itemHTML.append(itemText); itemHTML.append("</span>"); } return itemHTML.toString(); }
3.68
flink_AvailabilityProvider_resetAvailable
/** Resets the constant completed {@link #AVAILABLE} as the current state. */ public void resetAvailable() { availableFuture = AVAILABLE; }
3.68
framework_AbstractProperty_fireReadOnlyStatusChange
/** * Sends a read only status change event to all registered listeners. */ protected void fireReadOnlyStatusChange() { if (readOnlyStatusChangeListeners != null) { final Property.ReadOnlyStatusChangeEvent event = new ReadOnlyStatusChangeEvent( this); for (Object l : readOnlyStatusChangeListeners.toArray()) { ((Property.ReadOnlyStatusChangeListener) l) .readOnlyStatusChange(event); } } }
3.68
hadoop_AbfsRestOperation_completeExecute
/** * Executes the REST operation with retry, by issuing one or more * HTTP operations. * @param tracingContext TracingContext instance to track correlation IDs */ void completeExecute(TracingContext tracingContext) throws AzureBlobFileSystemException { // see if we have latency reports from the previous requests String latencyHeader = getClientLatency(); if (latencyHeader != null && !latencyHeader.isEmpty()) { AbfsHttpHeader httpHeader = new AbfsHttpHeader(HttpHeaderConfigurations.X_MS_ABFS_CLIENT_LATENCY, latencyHeader); requestHeaders.add(httpHeader); } retryCount = 0; LOG.debug("First execution of REST operation - {}", operationType); while (!executeHttpOperation(retryCount, tracingContext)) { try { ++retryCount; tracingContext.setRetryCount(retryCount); LOG.debug("Retrying REST operation {}. RetryCount = {}", operationType, retryCount); Thread.sleep(client.getRetryPolicy().getRetryInterval(retryCount)); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } } int status = result.getStatusCode(); /* If even after exhausting all retries, the http status code has an invalid value it qualifies for InvalidAbfsRestOperationException. All http status code less than 1xx range are considered as invalid status codes. */ if (status < HTTP_CONTINUE) { throw new InvalidAbfsRestOperationException(null, retryCount); } if (status >= HttpURLConnection.HTTP_BAD_REQUEST) { throw new AbfsRestOperationException(result.getStatusCode(), result.getStorageErrorCode(), result.getStorageErrorMessage(), null, result); } LOG.trace("{} REST operation complete", operationType); }
3.68
morf_AbstractSqlDialectTest_testUpdateUsingAliasedDestinationTable
/** * Tests an update where the destination table is aliased. */ @Test public void testUpdateUsingAliasedDestinationTable() { SelectStatement selectStmt = new SelectStatement(new FieldReference("settlementFrequency")) .from(new TableReference("FloatingRateDetail").as("B")) .where( eq(new FieldReference(new TableReference("A"), "floatingRateDetailId"), new FieldReference(new TableReference("B"), "id"))); UpdateStatement updateStmt = new UpdateStatement(new TableReference("FloatingRateRate").as("A")) .set(new FieldFromSelect(selectStmt).as("settlementFrequency")); assertEquals("Update from a select with aliased destination", expectedUpdateUsingAliasedDestinationTable(), testDialect.convertStatementToSQL(updateStmt)); }
3.68
AreaShop_GeneralRegion_saveRegionBlocks
/** * Save all blocks in a region for restoring later. * @param fileName The name of the file to save to (extension and folder will be added) * @return true if the region has been saved properly, otherwise false */ public boolean saveRegionBlocks(String fileName) { // Check if the region is correct ProtectedRegion region = getRegion(); if(region == null) { AreaShop.debug("Region '" + getName() + "' does not exist in WorldGuard, save failed"); return false; } // The path to save the schematic File saveFile = new File(plugin.getFileManager().getSchematicFolder() + File.separator + fileName); // Create parent directories File parent = saveFile.getParentFile(); if(parent != null && !parent.exists()) { if(!parent.mkdirs()) { AreaShop.warn("Did not save region " + getName() + ", schematic directory could not be created: " + saveFile.getAbsolutePath()); return false; } } boolean result = plugin.getWorldEditHandler().saveRegionBlocks(saveFile, this); if(result) { AreaShop.debug("Saved schematic for region " + getName()); } return true; }
3.68
pulsar_WorkerApiV2Resource_clientAppId
/** * @deprecated use {@link #authParams()} instead */ @Deprecated public String clientAppId() { return httpRequest != null ? (String) httpRequest.getAttribute(AuthenticationFilter.AuthenticatedRoleAttributeName) : null; }
3.68
hbase_StorageClusterStatusModel_setRequests
/** * @param requests the total number of requests per second handled by the cluster */ public void setRequests(long requests) { this.requests = requests; }
3.68
framework_UIDL_getStringAttribute
/** * Gets the named attribute as a String. * * @param name * the name of the attribute to get * @return the attribute value */ public String getStringAttribute(String name) { return attr().getString(name); }
3.68
hbase_RegionCoprocessorHost_preGet
// RegionObserver support /** * Supports Coprocessor 'bypass'. * @param get the Get request * @param results What to return if return is true/'bypass'. * @return true if default processing should be bypassed. * @exception IOException Exception */ public boolean preGet(final Get get, final List<Cell> results) throws IOException { if (coprocEnvironments.isEmpty()) { return false; } boolean bypassable = true; return execOperation(new RegionObserverOperationWithoutResult(bypassable) { @Override public void call(RegionObserver observer) throws IOException { observer.preGetOp(this, get, results); } }); }
3.68
hudi_BufferedRandomAccessFile_length
/** * Returns the length of the file, depending on whether buffer has more data (to be flushed). * @return - length of the file (including data yet to be flushed to the file). * @throws IOException */ @Override public long length() throws IOException { return Math.max(this.currentPosition, super.length()); }
3.68
hbase_MasterObserver_postGetRSGroupInfo
/** * Called after getting region server group info of the passed groupName. * @param ctx the environment to interact with the framework and master * @param groupName name of the group to get RSGroupInfo for */ default void postGetRSGroupInfo(final ObserverContext<MasterCoprocessorEnvironment> ctx, final String groupName) throws IOException { }
3.68
hadoop_RouterFedBalance_submit
/** * Start a ProcedureScheduler and submit the job. * * @param command the command options. * @param inputSrc the source input. This specifies the source path. * @param inputDst the dst input. This specifies the dst path. */ private int submit(CommandLine command, String inputSrc, String inputDst) throws IOException { Builder builder = new Builder(inputSrc, inputDst); // parse options. builder.setForceCloseOpen(command.hasOption(FORCE_CLOSE_OPEN.getOpt())); if (command.hasOption(MAP.getOpt())) { builder.setMap(Integer.parseInt(command.getOptionValue(MAP.getOpt()))); } if (command.hasOption(BANDWIDTH.getOpt())) { builder.setBandWidth( Integer.parseInt(command.getOptionValue(BANDWIDTH.getOpt()))); } if (command.hasOption(DELAY_DURATION.getOpt())) { builder.setDelayDuration( Long.parseLong(command.getOptionValue(DELAY_DURATION.getOpt()))); } if (command.hasOption(DIFF_THRESHOLD.getOpt())) { builder.setDiffThreshold(Integer.parseInt( command.getOptionValue(DIFF_THRESHOLD.getOpt()))); } if (command.hasOption(TRASH.getOpt())) { String val = command.getOptionValue(TRASH.getOpt()); if (val.equalsIgnoreCase("skip")) { builder.setTrashOpt(TrashOption.SKIP); } else if (val.equalsIgnoreCase("trash")) { builder.setTrashOpt(TrashOption.TRASH); } else if (val.equalsIgnoreCase("delete")) { builder.setTrashOpt(TrashOption.DELETE); } else { printUsage(); return -1; } } // Submit the job. BalanceProcedureScheduler scheduler = new BalanceProcedureScheduler(getConf()); scheduler.init(false); try { BalanceJob balanceJob = builder.build(); // Submit and wait until the job is done. scheduler.submit(balanceJob); scheduler.waitUntilDone(balanceJob); } catch (IOException e) { LOG.error("Submit balance job failed.", e); return -1; } finally { scheduler.shutDown(); } return 0; }
3.68
hadoop_PlacementConstraints_minCardinality
/** * Similar to {@link #minCardinality(String, int, String...)}, but let you * attach a namespace to the allocation tags. * * @param scope the scope of the constraint * @param namespace the namespace of these tags * @param minCardinality determines the minimum number of allocations within * the scope * @param allocationTags the constraint targets allocations with these tags * @return the resulting placement constraint */ public static AbstractConstraint minCardinality(String scope, String namespace, int minCardinality, String... allocationTags) { return cardinality(scope, namespace, minCardinality, Integer.MAX_VALUE, allocationTags); }
3.68
morf_OracleMetaDataProvider_tableNames
/** * @see org.alfasoftware.morf.metadata.Schema#tableNames() */ @Override public Collection<String> tableNames() { return tableMap().keySet(); }
3.68
flink_SerializedCompositeKeyBuilder_setKeyAndKeyGroup
/** * Sets the key and key-group as prefix. This will serialize them into the buffer and the will * be used to create composite keys with provided namespaces. * * @param key the key. * @param keyGroupId the key-group id for the key. */ public void setKeyAndKeyGroup(@Nonnull K key, @Nonnegative int keyGroupId) { try { serializeKeyGroupAndKey(key, keyGroupId); } catch (IOException shouldNeverHappen) { throw new FlinkRuntimeException(shouldNeverHappen); } }
3.68
hadoop_OBSBlockOutputStream_flushOrSync
/** * Flush local file or multipart to obs. focus: not posix bucket is not * support * * @throws IOException io exception */ private synchronized void flushOrSync() throws IOException { checkOpen(); if (hasException.get()) { String flushWarning = String.format( "flushOrSync has error. bs : pre write obs[%s] has error.", key); LOG.warn(flushWarning); throw new IOException(flushWarning); } if (fs.isFsBucket()) { // upload flushCurrentBlock(); // clear clearHFlushOrSync(); } else { LOG.warn("not posix bucket, not support hflush or hsync."); flush(); } }
3.68
hudi_FormatUtils_setRowKind
/** * Sets up the row kind to the row data {@code rowData} from the resolved operation. */ public static void setRowKind(RowData rowData, IndexedRecord record, int index) { if (index == -1) { return; } rowData.setRowKind(getRowKind(record, index)); }
3.68
framework_DragSourceExtension_getDataTransferData
/** * Returns the map of data stored in this drag source element. The returned * map preserves the order of storage and is unmodifiable. * * @return Unmodifiable copy of the map of data in the order the data was * stored. */ public Map<String, String> getDataTransferData() { Map<String, String> data = getState(false).data; // Create a map of data that preserves the order of types LinkedHashMap<String, String> orderedData = new LinkedHashMap<>( data.size()); getState(false).types .forEach(type -> orderedData.put(type, data.get(type))); return Collections.unmodifiableMap(orderedData); }
3.68
morf_NamedParameterPreparedStatement_toString
/** * @see java.lang.Object#toString() */ @Override public String toString() { return String.format("[%s]:%s", query, indexMap.toString()); }
3.68
open-banking-gateway_Xs2aFlowNameSelector_getNameForExecution
/** * Sub-process name for current context (PSU/FinTech input) execution (real calls to ASPSP API). */ public String getNameForExecution(Xs2aContext ctx) { return actionName(ctx); }
3.68
dubbo_Utf8Utils_isTwoBytes
/** * Returns whether this is a two-byte codepoint with the form '10XXXXXX'. */ private static boolean isTwoBytes(byte b) { return b < (byte) 0xE0; }
3.68
hadoop_S3APrefetchingInputStream_setReadahead
/** * Sets the number of bytes to read ahead each time. * * @param readahead the number of bytes to read ahead each time.. */ @Override public synchronized void setReadahead(Long readahead) { if (!isClosed()) { inputStream.setReadahead(readahead); } }
3.68
hadoop_ByteBufferEncodingState_checkBuffers
/** * Check and ensure the buffers are of the desired length and type, direct * buffers or not. * @param buffers the buffers to check */ void checkBuffers(ByteBuffer[] buffers) { for (ByteBuffer buffer : buffers) { if (buffer == null) { throw new HadoopIllegalArgumentException( "Invalid buffer found, not allowing null"); } if (buffer.remaining() != encodeLength) { throw new HadoopIllegalArgumentException( "Invalid buffer, not of length " + encodeLength); } if (buffer.isDirect() != usingDirectBuffer) { throw new HadoopIllegalArgumentException( "Invalid buffer, isDirect should be " + usingDirectBuffer); } } }
3.68
hudi_SourceFormatAdapter_isFieldNameSanitizingEnabled
/** * Config that automatically sanitizes the field names as per avro naming rules. * @return enabled status. */ private boolean isFieldNameSanitizingEnabled() { return shouldSanitize; }
3.68
hbase_OrderedBytes_decodeInt8
/** * Decode an {@code int8} value. * @see #encodeInt8(PositionedByteRange, byte, Order) */ public static byte decodeInt8(PositionedByteRange src) { final byte header = src.get(); assert header == FIXED_INT8 || header == DESCENDING.apply(FIXED_INT8); Order ord = header == FIXED_INT8 ? ASCENDING : DESCENDING; return (byte) ((ord.apply(src.get()) ^ 0x80) & 0xff); }
3.68
flink_BinaryStringDataUtil_toInt
/** * Parses this BinaryStringData to Int. * * <p>Note that, in this method we accumulate the result in negative format, and convert it to * positive format at the end, if this string is not started with '-'. This is because min value * is bigger than max value in digits, e.g. Integer.MAX_VALUE is '2147483647' and * Integer.MIN_VALUE is '-2147483648'. * * <p>This code is mostly copied from LazyInt.parseInt in Hive. * * <p>Note that, this method is almost same as `toLong`, but we leave it duplicated for * performance reasons, like Hive does. */ public static int toInt(BinaryStringData str) throws NumberFormatException { int sizeInBytes = str.getSizeInBytes(); byte[] tmpBytes = getTmpBytes(str, sizeInBytes); if (sizeInBytes == 0) { throw numberFormatExceptionFor(str, "Input is empty."); } int i = 0; byte b = tmpBytes[i]; final boolean negative = b == '-'; if (negative || b == '+') { i++; if (sizeInBytes == 1) { throw numberFormatExceptionFor(str, "Input has only positive or negative symbol."); } } int result = 0; final byte separator = '.'; final int radix = 10; final long stopValue = Integer.MIN_VALUE / radix; while (i < sizeInBytes) { b = tmpBytes[i]; i++; if (b == separator) { // We allow decimals and will return a truncated integral in that case. // Therefore we won't throw an exception here (checking the fractional // part happens below.) break; } int digit; if (b >= '0' && b <= '9') { digit = b - '0'; } else { throw numberFormatExceptionFor(str, "Invalid character found."); } // We are going to process the new digit and accumulate the result. However, before // doing this, if the result is already smaller than the // stopValue(Long.MIN_VALUE / radix), then result * 10 will definitely be smaller // than minValue, and we can stop. if (result < stopValue) { throw numberFormatExceptionFor(str, "Overflow."); } result = result * radix - digit; // Since the previous result is less than or equal to // stopValue(Long.MIN_VALUE / radix), we can just use `result > 0` to check overflow. // If result overflows, we should stop. if (result > 0) { throw numberFormatExceptionFor(str, "Overflow."); } } // This is the case when we've encountered a decimal separator. The fractional // part will not change the number, but we will verify that the fractional part // is well formed. while (i < sizeInBytes) { byte currentByte = tmpBytes[i]; if (currentByte < '0' || currentByte > '9') { throw numberFormatExceptionFor(str, "Invalid character found."); } i++; } if (!negative) { result = -result; if (result < 0) { throw numberFormatExceptionFor(str, "Overflow."); } } return result; }
3.68
framework_HierarchyMapper_registerChildren
/** * Register parent and children items into inner structures. May be * overridden in subclasses. * * @param parent * the parent item * @param childList * list of parents children to be registered. */ protected void registerChildren(T parent, List<T> childList) { childMap.put(parent, new HashSet<>(childList)); childList.forEach( x -> parentIdMap.put(getDataProvider().getId(x), parent)); }
3.68
framework_GridMultiSelect_select
/** * Selects the given item. If another item was already selected, that item * is deselected. * * @param item * the item to select */ public void select(T item) { model.select(item); }
3.68
hadoop_Trilean_toBoolean
/** * Converts the Trilean enum to boolean. * * @return the corresponding boolean. * @throws TrileanConversionException when tried to convert Trilean.UNKNOWN. */ public boolean toBoolean() throws TrileanConversionException { if (this == Trilean.UNKNOWN) { throw new TrileanConversionException(); } return Boolean.valueOf(this.name()); }
3.68
hadoop_CopyOutputFormat_setCommitDirectory
/** * Setter for the final directory for DistCp (where files copied will be * moved, atomically.) * @param job The Job on whose configuration the working-directory is to be set. * @param commitDirectory The path to use for final commit. */ public static void setCommitDirectory(Job job, Path commitDirectory) { job.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, commitDirectory.toString()); }
3.68
open-banking-gateway_QuirkUtil_pushBicToXs2aAdapterHeaders
// TODO: Needed because of https://github.com/adorsys/xs2a/issues/73 and because we can't set "X-OAUTH-PREFERRED" header directly public static RequestHeaders pushBicToXs2aAdapterHeaders(Xs2aContext context, RequestHeaders toEnhance) { // TODO: Warning, for Adorsys Sandbox for Oauth2-Integrated the adapter should be configured to send proper header // due to https://github.com/adorsys/xs2a/issues/73 String bankCode = context.getRequestScoped().aspspProfile().getBankCode(); if (!Strings.isNullOrEmpty(bankCode)) { Map<String, String> headers = toEnhance.toMap(); headers.put(X_GTW_BANK_CODE, bankCode); return RequestHeaders.fromMap(headers); } return toEnhance; }
3.68
hbase_MemStoreLABImpl_copyBBECToChunkCell
/** * Clone the passed cell by copying its data into the passed buf and create a cell with a chunkid * out of it * @see #copyToChunkCell(Cell, ByteBuffer, int, int) */ private static Cell copyBBECToChunkCell(ByteBufferExtendedCell cell, ByteBuffer buf, int offset, int len) { int tagsLen = cell.getTagsLength(); cell.write(buf, offset); return createChunkCell(buf, offset, len, tagsLen, cell.getSequenceId()); }
3.68
flink_MemorySize_getGibiBytes
/** Gets the memory size in Gibibytes (= 1024 Mebibytes). */ public long getGibiBytes() { return bytes >> 30; }
3.68
hadoop_AppCollectorData_happensBefore
/** * Returns if a collector data item happens before another one. Null data * items happens before any other non-null items. Non-null data items A * happens before another non-null item B when A's rmIdentifier is less than * B's rmIdentifier. Or A's version is less than B's if they have the same * rmIdentifier. * * @param dataA first collector data item. * @param dataB second collector data item. * @return true if dataA happens before dataB. */ public static boolean happensBefore(AppCollectorData dataA, AppCollectorData dataB) { if (dataA == null && dataB == null) { return false; } else if (dataA == null || dataB == null) { return dataA == null; } return (dataA.getRMIdentifier() < dataB.getRMIdentifier()) || ((dataA.getRMIdentifier() == dataB.getRMIdentifier()) && (dataA.getVersion() < dataB.getVersion())); }
3.68
hbase_ZKUtil_deleteChildrenRecursively
/** * Delete all the children of the specified node but not the node itself. Sets no watches. Throws * all exceptions besides dealing with deletion of children. * @throws KeeperException if a ZooKeeper operation fails */ public static void deleteChildrenRecursively(ZKWatcher zkw, String node) throws KeeperException { deleteChildrenRecursivelyMultiOrSequential(zkw, true, node); }
3.68
framework_FocusableGrid_addKeyPressHandler
/* * (non-Javadoc) * * @see * com.google.gwt.event.dom.client.HasKeyPressHandlers#addKeyPressHandler * (com.google.gwt.event.dom.client.KeyPressHandler) */ @Override public HandlerRegistration addKeyPressHandler(KeyPressHandler handler) { return addDomHandler(handler, KeyPressEvent.getType()); }
3.68
flink_MemorySegment_getLongBigEndian
/** * Reads a long integer value (64bit, 8 bytes) from the given position, in big endian byte * order. This method's speed depends on the system's native byte order, and it is possibly * slower than {@link #getLong(int)}. For most cases (such as transient storage in memory or * serialization for I/O and network), it suffices to know that the byte order in which the * value is written is the same as the one in which it is read, and {@link #getLong(int)} is the * preferable choice. * * @param index The position from which the value will be read. * @return The long value at the given position. * @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the * segment size minus 8. */ public long getLongBigEndian(int index) { if (LITTLE_ENDIAN) { return Long.reverseBytes(getLong(index)); } else { return getLong(index); } }
3.68
flink_UnsortedGrouping_max
/** * Syntactic sugar for aggregate (MAX, field). * * @param field The index of the Tuple field on which the aggregation function is applied. * @return An AggregateOperator that represents the max'ed DataSet. * @see org.apache.flink.api.java.operators.AggregateOperator */ public AggregateOperator<T> max(int field) { return this.aggregate(Aggregations.MAX, field, Utils.getCallLocationName()); }
3.68
hbase_ActiveMasterManager_hasActiveMaster
/** Returns True if cluster has an active master. */ boolean hasActiveMaster() { try { if (ZKUtil.checkExists(watcher, watcher.getZNodePaths().masterAddressZNode) >= 0) { return true; } } catch (KeeperException ke) { LOG.info("Received an unexpected KeeperException when checking " + "isActiveMaster : " + ke); } return false; }
3.68
hbase_ImmutableBytesWritable_compareTo
/** * Compares the bytes in this object to the specified byte array * @return Positive if left is bigger than right, 0 if they are equal, and negative if left is * smaller than right. */ public int compareTo(final byte[] that) { return WritableComparator.compareBytes(this.bytes, this.offset, this.length, that, 0, that.length); }
3.68
flink_MemoryManager_getMemorySize
/** * Returns the total size of memory handled by this memory manager. * * @return The total size of memory. */ public long getMemorySize() { return memoryBudget.getTotalMemorySize(); }
3.68
hadoop_FsAction_implies
/** * Return true if this action implies that action. * @param that FsAction that. * @return if implies true,not false. */ public boolean implies(FsAction that) { if (that != null) { return (ordinal() & that.ordinal()) == that.ordinal(); } return false; }
3.68
hbase_Bytes_putShort
/** * Put a short value out to the specified byte array position. * @param bytes the byte array * @param offset position in the array * @param val short to write out * @return incremented offset * @throws IllegalArgumentException if the byte array given doesn't have enough room at the offset * specified. */ public static int putShort(byte[] bytes, int offset, short val) { if (bytes.length - offset < SIZEOF_SHORT) { throw new IllegalArgumentException("Not enough room to put a short at" + " offset " + offset + " in a " + bytes.length + " byte array"); } return ConverterHolder.BEST_CONVERTER.putShort(bytes, offset, val); }
3.68
MagicPlugin_PreLoadEvent_registerTeamProvider
/** * Register a TeamProvider, to be able to make decisions about who players and mobs can target. * * @param provider The provider to add. */ public void registerTeamProvider(TeamProvider provider) { teamProviders.add(provider); }
3.68
hudi_CsvDFSSource_fromFiles
/** * Reads the CSV files and parsed the lines into {@link Dataset} of {@link Row}. * * @param pathStr The list of file paths, separated by ','. * @return {@link Dataset} of {@link Row} containing the records. */ private Option<Dataset<Row>> fromFiles(Option<String> pathStr) { if (pathStr.isPresent()) { DataFrameReader dataFrameReader = sparkSession.read().format("csv"); CSV_CONFIG_KEYS.forEach(optionKey -> { String configPropName = CSV_SRC_CONFIG_PREFIX + optionKey; String oldConfigPropName = OLD_CSV_SRC_CONFIG_PREFIX + optionKey; String value = props.getString(configPropName, props.getString(oldConfigPropName, null)); // Pass down the Hudi CSV configs to Spark DataFrameReader if (value != null) { dataFrameReader.option(optionKey, value); } }); if (sourceSchema != null) { // Source schema is specified, pass it to the reader dataFrameReader.schema(sourceSchema); } dataFrameReader.option("inferSchema", Boolean.toString(sourceSchema == null)); return Option.of(dataFrameReader.load(pathStr.get().split(","))); } else { return Option.empty(); } }
3.68
flink_BlockingBackChannel_getWriteEnd
/** Called by iteration tail to save the output of the current superstep. */ public DataOutputView getWriteEnd() { return buffer; }
3.68
framework_FlyweightCell_assertSetup
/** * Asserts that the flyweight cell has properly been set up before trying to * access any of its data. */ private void assertSetup() { assert currentIterator != null : "FlyweightCell was not properly " + "initialized. This is either a bug in Grid/Escalator " + "or a Cell reference has been stored and reused " + "inappropriately."; }
3.68
hadoop_ReferenceCountMap_clear
/** * Clear the contents */ @VisibleForTesting public void clear() { referenceMap.clear(); }
3.68
hadoop_PublishedConfiguration_isEmpty
/** * Is the configuration empty. This means either that it has not * been given any values, or it is stripped down copy set down over the * wire. * @return true if it is empty */ public boolean isEmpty() { return entries.isEmpty(); }
3.68
hadoop_IOStatisticsBinding_trackDurationOfInvocation
/** * Given an IOException raising callable/lambda expression, * execute it and update the relevant statistic. * @param factory factory of duration trackers * @param statistic statistic key * @param input input callable. * @throws IOException IO failure. */ public static void trackDurationOfInvocation( DurationTrackerFactory factory, String statistic, InvocationRaisingIOE input) throws IOException { measureDurationOfInvocation(factory, statistic, input); }
3.68
flink_AbstractParameterTool_getLong
/** * Returns the Long value for the given key. If the key does not exists it will return the * default value given. The method fails if the value is not a Long. */ public long getLong(String key, long defaultValue) { addToDefaults(key, Long.toString(defaultValue)); String value = get(key); if (value == null) { return defaultValue; } return Long.parseLong(value); }
3.68
hbase_VisibilityClient_isCellVisibilityEnabled
/** * Return true if cell visibility features are supported and enabled * @param connection The connection to use * @return true if cell visibility features are supported and enabled, false otherwise */ public static boolean isCellVisibilityEnabled(Connection connection) throws IOException { return connection.getAdmin().getSecurityCapabilities() .contains(SecurityCapability.CELL_VISIBILITY); }
3.68
hbase_ClientMetaTableAccessor_getRegionLocationWithEncodedName
/** Returns the HRegionLocation from meta for the given encoded region name */ public static CompletableFuture<Optional<HRegionLocation>> getRegionLocationWithEncodedName(AsyncTable<?> metaTable, byte[] encodedRegionName) { CompletableFuture<Optional<HRegionLocation>> future = new CompletableFuture<>(); addListener( metaTable .scanAll(new Scan().setReadType(ReadType.PREAD).addFamily(HConstants.CATALOG_FAMILY)), (results, err) -> { if (err != null) { future.completeExceptionally(err); return; } String encodedRegionNameStr = Bytes.toString(encodedRegionName); results.stream().filter(result -> !result.isEmpty()) .filter(result -> CatalogFamilyFormat.getRegionInfo(result) != null).forEach(result -> { getRegionLocations(result).ifPresent(locations -> { for (HRegionLocation location : locations.getRegionLocations()) { if ( location != null && encodedRegionNameStr.equals(location.getRegion().getEncodedName()) ) { future.complete(Optional.of(location)); return; } } }); }); future.complete(Optional.empty()); }); return future; }
3.68
hadoop_AbstractS3ACommitter_loadAndCommit
/** * Load a pendingset file and commit all of its contents. * Invoked within a parallel run; the commitContext thread * pool is already busy/possibly full, so do not * execute work through the same submitter. * @param commitContext context to commit through * @param activeCommit commit state * @param status file to load * @throws IOException failure */ private void loadAndCommit( final CommitContext commitContext, final ActiveCommit activeCommit, final FileStatus status) throws IOException { final Path path = status.getPath(); commitContext.switchToIOStatisticsContext(); try (DurationInfo ignored = new DurationInfo(LOG, "Loading and committing files in pendingset %s", path)) { PendingSet pendingSet = PersistentCommitData.load( activeCommit.getSourceFS(), status, commitContext.getPendingSetSerializer()); String jobId = pendingSet.getJobId(); if (!StringUtils.isEmpty(jobId) && !getUUID().equals(jobId)) { throw new PathCommitException(path, String.format("Mismatch in Job ID (%s) and commit job ID (%s)", getUUID(), jobId)); } TaskPool.foreach(pendingSet.getCommits()) .stopOnFailure() .suppressExceptions(false) .executeWith(commitContext.getInnerSubmitter()) .onFailure((commit, exception) -> commitContext.abortSingleCommit(commit)) .abortWith(commitContext::abortSingleCommit) .revertWith(commitContext::revertCommit) .run(commit -> { commitContext.commitOrFail(commit); activeCommit.uploadCommitted( commit.getDestinationKey(), commit.getLength()); }); activeCommit.pendingsetCommitted(pendingSet.getIOStatistics()); } }
3.68
hadoop_DefaultAnonymizableDataType_needsAnonymization
// Determines if the contained data needs anonymization protected boolean needsAnonymization(Configuration conf) { return true; }
3.68
hbase_RpcServer_getCurrentServerCallWithCellScanner
/** * Just return the current rpc call if it is a {@link ServerCall} and also has {@link CellScanner} * attached. * <p/> * Mainly used for reference counting as {@link CellScanner} may reference non heap memory. */ public static Optional<ServerCall<?>> getCurrentServerCallWithCellScanner() { return getCurrentCall().filter(c -> c instanceof ServerCall) .filter(c -> c.getCellScanner() != null).map(c -> (ServerCall<?>) c); }
3.68
framework_MultiSelectionRenderer_doScrollAreaChecks
/** * This method checks whether the first pointer event started in an area * that would start scrolling immediately, and does some actions * accordingly. * <p> * If it is, that scroll area will be offset "beyond" the pointer (above * if pointer is towards the top, otherwise below). * <p> * <span style="font-size:smaller">*) This behavior will change in * future patches (henrik paul 2.7.2014)</span> */ private void doScrollAreaChecks(int pageY) { /* * The first run makes sure that neither scroll position is * underneath the finger, but offset to either direction from * underneath the pointer. */ if (topBound == -1) { topBound = Math.min(finalTopBound, pageY); bottomBound = Math.max(finalBottomBound, pageY); } else { /* * Subsequent runs make sure that the scroll area grows (but * doesn't shrink) with the finger, but no further than the * final bound. */ int oldTopBound = topBound; if (topBound < finalTopBound) { topBound = Math.max(topBound, Math.min(finalTopBound, pageY)); } int oldBottomBound = bottomBound; if (bottomBound > finalBottomBound) { bottomBound = Math.min(bottomBound, Math.max(finalBottomBound, pageY)); } final boolean topDidNotMove = oldTopBound == topBound; final boolean bottomDidNotMove = oldBottomBound == bottomBound; final boolean wasVerticalMovement = pageY != this.pageY; scrollAreaShouldRebound = (topDidNotMove && bottomDidNotMove && wasVerticalMovement); } }
3.68
hudi_HoodieTableMetaClient_createNewInstantTime
/** * Returns next instant time in the correct format. * * @param shouldLock whether the lock should be enabled to get the instant time. */ public String createNewInstantTime(boolean shouldLock) { TimeGenerator timeGenerator = TimeGenerators .getTimeGenerator(timeGeneratorConfig, hadoopConf.get()); return HoodieActiveTimeline.createNewInstantTime(shouldLock, timeGenerator); }
3.68
hadoop_S3AReadOpContext_getVectoredIOContext
/** * Get Vectored IO context for this this read op. * @return vectored IO context. */ public VectoredIOContext getVectoredIOContext() { return vectoredIOContext; }
3.68
hbase_MetricsConnection_getMultiTracker
/** multiTracker metric */ public CallTracker getMultiTracker() { return multiTracker; }
3.68
framework_MethodInvocation_getLastOnlyTag
/** * Gets a String tag that is used to uniquely identify previous method * invocations that should be purged from the queue if * <code>{@literal @}Delay(lastOnly = true)</code> is used. * <p> * The returned string should contain at least one non-number char to ensure * it doesn't collide with the keys used for invocations without lastOnly. * * @return a string identifying this method invocation */ public String getLastOnlyTag() { return connectorId + "-" + getInterfaceName() + "-" + getMethodName(); }
3.68
hbase_VisibilityLabelsCache_getLabel
/** * @param ordinal The ordinal of label which we are looking for. * @return The label having the given ordinal. Returns <code>null</code> when no label exist in * the system with given ordinal */ @Override public String getLabel(int ordinal) { this.lock.readLock().lock(); try { return this.ordinalVsLabels.get(ordinal); } finally { this.lock.readLock().unlock(); } }
3.68
hbase_CryptoAES_wrap
/** * Encrypts input data. The result composes of (msg, padding if needed, mac) and sequence num. * @param data the input byte array * @param offset the offset in input where the input starts * @param len the input length * @return the new encrypted byte array. * @throws SaslException if error happens */ public byte[] wrap(byte[] data, int offset, int len) throws SaslException { // mac byte[] mac = integrity.getHMAC(data, offset, len); integrity.incMySeqNum(); // encrypt byte[] encrypted = new byte[len + 10]; try { int n = encryptor.update(data, offset, len, encrypted, 0); encryptor.update(mac, 0, 10, encrypted, n); } catch (ShortBufferException sbe) { // this should not happen throw new SaslException("Error happens during encrypt data", sbe); } // append seqNum used for mac byte[] wrapped = new byte[encrypted.length + 4]; System.arraycopy(encrypted, 0, wrapped, 0, encrypted.length); System.arraycopy(integrity.getSeqNum(), 0, wrapped, encrypted.length, 4); return wrapped; }
3.68
hadoop_ReconfigurableBase_startReconfigurationTask
/** * Start a reconfiguration task to reload configuration in background. * @throws IOException raised on errors performing I/O. */ public void startReconfigurationTask() throws IOException { synchronized (reconfigLock) { if (!shouldRun) { String errorMessage = "The server is stopped."; LOG.warn(errorMessage); throw new IOException(errorMessage); } if (reconfigThread != null) { String errorMessage = "Another reconfiguration task is running."; LOG.warn(errorMessage); throw new IOException(errorMessage); } reconfigThread = new ReconfigurationThread(this); reconfigThread.setDaemon(true); reconfigThread.setName("Reconfiguration Task"); reconfigThread.start(); startTime = Time.now(); } }
3.68
hbase_LruCachedBlockQueue_heapSize
/** * Total size of all elements in this queue. * @return size of all elements currently in queue, in bytes */ @Override public long heapSize() { return heapSize; }
3.68
hadoop_SnappyCodec_createDecompressor
/** * Create a new {@link Decompressor} for use by this {@link CompressionCodec}. * * @return a new decompressor for use by this codec */ @Override public Decompressor createDecompressor() { int bufferSize = conf.getInt( CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT); return new SnappyDecompressor(bufferSize); }
3.68
flink_UnsortedGrouping_maxBy
/** * Applies a special case of a reduce transformation (maxBy) on a grouped {@link DataSet}. * * <p>The transformation consecutively calls a {@link ReduceFunction} until only a single * element remains which is the result of the transformation. A ReduceFunction combines two * elements into one new element of the same type. * * @param fields Keys taken into account for finding the minimum. * @return A {@link ReduceOperator} representing the minimum. */ @SuppressWarnings({"unchecked", "rawtypes"}) public ReduceOperator<T> maxBy(int... fields) { // Check for using a tuple if (!this.inputDataSet.getType().isTupleType() || !(this.inputDataSet.getType() instanceof TupleTypeInfo)) { throw new InvalidProgramException("Method maxBy(int) only works on tuples."); } return new ReduceOperator<T>( this, new SelectByMaxFunction((TupleTypeInfo) this.inputDataSet.getType(), fields), Utils.getCallLocationName()); }
3.68
pulsar_TopicsImpl_validateTopic
/* * returns topic name with encoded Local Name */ private TopicName validateTopic(String topic) { // Parsing will throw exception if name is not valid return TopicName.get(topic); }
3.68
hadoop_GlobPattern_matches
/** * Match input against the compiled glob pattern * @param s input chars * @return true for successful matches */ public boolean matches(CharSequence s) { return compiled.matcher(s).matches(); }
3.68
flink_Transformation_setBufferTimeout
/** * Set the buffer timeout of this {@code Transformation}. The timeout defines how long data may * linger in a partially full buffer before being sent over the network. * * <p>Lower timeouts lead to lower tail latencies, but may affect throughput. For Flink 1.5+, * timeouts of 1ms are feasible for jobs with high parallelism. * * <p>A value of -1 means that the default buffer timeout should be used. A value of zero * indicates that no buffering should happen, and all records/events should be immediately sent * through the network, without additional buffering. */ public void setBufferTimeout(long bufferTimeout) { checkArgument(bufferTimeout >= -1); this.bufferTimeout = bufferTimeout; }
3.68
framework_LayoutManager_reportOuterHeight
/** * Registers the outer height (including margins, borders and paddings) of a * component. This can be used as an optimization by ManagedLayouts; by * informing the LayoutManager about what size a component will have, the * layout propagation can continue directly without first measuring the * potentially resized elements. * * @param component * the component for which the size is reported * @param outerHeight * the new outer height (including margins, borders and paddings) * of the component in pixels */ public void reportOuterHeight(ComponentConnector component, int outerHeight) { Element element = component.getWidget().getElement(); MeasuredSize measuredSize = getMeasuredSize(element); if (isLayoutRunning()) { boolean heightChanged = measuredSize.setOuterHeight(outerHeight); if (heightChanged) { onConnectorChange(component, false, true); notifyListenersAndDepdendents(element, false, true); } currentDependencyTree.setNeedsVerticalMeasure(component, false); } else if (measuredSize.getOuterHeight() != outerHeight) { setNeedsMeasure(component); } }
3.68
hbase_UnsafeAccess_getAsInt
/** * Reads bytes at the given offset as an int value. * @return int value at offset */ private static int getAsInt(ByteBuffer buf, int offset) { if (buf.isDirect()) { return HBasePlatformDependent.getInt(directBufferAddress(buf) + offset); } return HBasePlatformDependent.getInt(buf.array(), BYTE_ARRAY_BASE_OFFSET + buf.arrayOffset() + offset); }
3.68
incubator-hugegraph-toolchain_JDBCVendor_buildGetHeaderSql
/** * NOTE: don't add a semicolon(;) at the end of oracle sql */ @Override public String buildGetHeaderSql(JDBCSource source) { return String.format("SELECT COLUMN_NAME " + "FROM USER_TAB_COLUMNS " + "WHERE TABLE_NAME = %s " + "ORDER BY COLUMN_ID", this.escape(source.table())); }
3.68
morf_DrawIOGraphPrinter_generateColourForModule
/** * @param input * @return colour based on given input */ static Colour generateColourForModule(String input){ int i = input.hashCode(); int r = i>>16&0xFF; int g = i>>8&0xFF; int b = i&0xFF; Colour colour = new Colour(); colour.hex = "#" + String.format("%02X", r) + String.format("%02X", g) + String.format("%02X", b); double luma = 0.2126 * r + 0.7152 * g + 0.0722 * b; //Runs from 1 - 256 - Less than 128 = dark, so then use white text if(luma < 128) { colour.whiteText = true; } return colour; }
3.68
hbase_Get_getMaxResultsPerColumnFamily
/** * Method for retrieving the get's maximum number of values to return per Column Family * @return the maximum number of values to fetch per CF */ public int getMaxResultsPerColumnFamily() { return this.storeLimit; }
3.68
zilla_HpackContext_staticIndex17
// Index in static table for the given name of length 17 private static int staticIndex17(DirectBuffer name) { switch (name.getByte(16)) { case 'e': if (STATIC_TABLE[40].name.equals(name)) // if-modified-since { return 40; } break; case 'g': if (STATIC_TABLE[57].name.equals(name)) // transfer-encoding { return 57; } break; } return -1; }
3.68
hadoop_SaslParticipant_unwrap
/** * Unwraps a byte array. * * @param bytes The array containing the bytes to unwrap. * @param off The starting position at the array * @param len The number of bytes to unwrap * @return byte[] unwrapped bytes * @throws SaslException if the bytes cannot be successfully unwrapped */ public byte[] unwrap(byte[] bytes, int off, int len) throws SaslException { if (saslClient != null) { return saslClient.unwrap(bytes, off, len); } else { return saslServer.unwrap(bytes, off, len); } }
3.68