name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_OptimizerNode_getId
/** * Gets the ID of this node. If the id has not yet been set, this method returns -1; * * @return This node's id, or -1, if not yet set. */ public int getId() { return this.id; }
3.68
flink_PrimitiveArrayTypeInfo_getInfoFor
/** * Tries to get the PrimitiveArrayTypeInfo for an array. Returns null, if the type is an array, * but the component type is not a primitive type. * * @param type The class of the array. * @return The corresponding PrimitiveArrayTypeInfo, or null, if the array is not an array of * primitives. * @throws InvalidTypesException Thrown, if the given class does not represent an array. */ @SuppressWarnings("unchecked") @PublicEvolving public static <X> PrimitiveArrayTypeInfo<X> getInfoFor(Class<X> type) { if (!type.isArray()) { throw new InvalidTypesException("The given class is no array."); } // basic type arrays return (PrimitiveArrayTypeInfo<X>) TYPES.get(type); }
3.68
flink_MergeIterator_next
/** * Gets the next smallest element, with respect to the definition of order implied by the {@link * TypeSerializer} provided to this iterator. * * @return The next element if the iterator has another element, null otherwise. * @see org.apache.flink.util.MutableObjectIterator#next() */ @Override public E next() throws IOException { if (this.heap.size() > 0) { // get the smallest element final HeadStream<E> top = this.heap.peek(); E result = top.getHead(); // read an element if (!top.nextHead()) { this.heap.poll(); } else { this.heap.adjustTop(); } return result; } else { return null; } }
3.68
morf_SqlDialect_getSqlForMax
/** * Converts the max function into SQL. * * @param function the function details * @return a string representation of the SQL */ protected String getSqlForMax(Function function) { return "MAX(" + getSqlFrom(function.getArguments().get(0)) + ")"; }
3.68
hbase_SnapshotInfo_getMobStoreFilesCount
/** Returns the number of available store files in the mob dir */ public int getMobStoreFilesCount() { return hfilesMobCount.get(); }
3.68
hadoop_EmptyIOStatisticsContextImpl_getID
/** * The ID is always 0. * As the real context implementation counter starts at 1, * we are guaranteed to have unique IDs even between them and * the empty context. * @return 0 */ @Override public long getID() { return 0; }
3.68
flink_RocksDBNativeMetricOptions_enableCompactionPending
/** Returns 1 if at least one compaction is pending; otherwise, returns 0. */ public void enableCompactionPending() { this.properties.add(RocksDBProperty.CompactionPending.getRocksDBProperty()); }
3.68
hadoop_ApplicationRowKey_getRowKeyAsString
/** * Constructs a row key for the application table as follows: * {@code clusterId!userName!flowName!flowRunId!AppId}. * @return String representation of row key. */ public String getRowKeyAsString() { return appRowKeyConverter.encodeAsString(this); }
3.68
flink_DynamicSinkUtils_projectColumnsForUpdate
// create a project only select the required column or expression for update private static RelNode projectColumnsForUpdate( LogicalTableModify tableModify, int originColsCount, ResolvedSchema resolvedSchema, List<Integer> updatedIndexes, SupportsRowLevelUpdate.RowLevelUpdateMode updateMode, String tableDebugName, DataTypeFactory dataTypeFactory, FlinkTypeFactory typeFactory) { RexBuilder rexBuilder = tableModify.getCluster().getRexBuilder(); // the updated columns, whose order is same to user's update clause List<String> updatedColumnNames = tableModify.getUpdateColumnList(); List<RexNode> newRexNodeList = new ArrayList<>(); List<String> newFieldNames = new ArrayList<>(); List<DataType> updateTargetDataTypes = new ArrayList<>(); Project project = (Project) (tableModify.getInput()); LogicalFilter filter = null; // if the update mode is all rows, we need to know the filter to rewrite // the update expression to IF(filter, updated_expr, col_expr) if (updateMode == SupportsRowLevelUpdate.RowLevelUpdateMode.ALL_ROWS && project.getInput() instanceof LogicalFilter) { filter = (LogicalFilter) project.getInput(); } // the rex nodes for the project are like: index for all col, update expressions for the // updated columns List<RexNode> oldRexNodes = project.getProjects(); for (int index : updatedIndexes) { String colName = resolvedSchema.getColumnNames().get(index); // if the updated cols contain the col to be selected, the updated expression should // be in the project node if (updatedColumnNames.contains(colName)) { // get the index of the updated column in all updated columns int i = updatedColumnNames.indexOf(colName); // get the update expression RexNode rexNode = oldRexNodes.get(originColsCount + i); if (filter != null) { rexNode = rexBuilder.makeCall( FlinkSqlOperatorTable.IF, Arrays.asList( filter.getCondition(), rexNode, rexBuilder.makeInputRef(project.getInput(), index))); } newRexNodeList.add(rexNode); } else { newRexNodeList.add(rexBuilder.makeInputRef(project.getInput(), index)); } newFieldNames.add(colName); updateTargetDataTypes.add(resolvedSchema.getColumnDataTypes().get(index)); } project = project.copy( project.getTraitSet(), // if filter is not null, we need to remove the filter in the plan since we // have rewritten the expression to IF(filter, updated_expr, col_expr) filter != null ? filter.getInput() : project.getInput(), newRexNodeList, RexUtil.createStructType(typeFactory, newRexNodeList, newFieldNames, null)); return validateSchemaAndApplyImplicitCast( project, updateTargetDataTypes, tableDebugName, dataTypeFactory, typeFactory); }
3.68
flink_PrioritizedDeque_getNumPriorityElements
/** Returns the current number of priority elements ([0; {@link #size()}]). */ public int getNumPriorityElements() { return numPriorityElements; }
3.68
hadoop_ExecutorServiceFuturePool_executeRunnable
/** * @param r runnable to run in future on executor pool * @return future * @throws java.util.concurrent.RejectedExecutionException can be thrown * @throws NullPointerException if r param is null */ @SuppressWarnings("unchecked") public Future<Void> executeRunnable(final Runnable r) { return (Future<Void>) executor.submit(r::run); }
3.68
hbase_TableState_isInStates
/** * Static version of state checker * @param target equals to any of * @return true if satisfies */ public boolean isInStates(State... target) { for (State tableState : target) { if (this.state.equals(tableState)) { return true; } } return false; }
3.68
flink_HybridSource_addSource
/** Add source with deferred instantiation based on previous enumerator. */ public <ToEnumT extends SplitEnumerator, NextSourceT extends Source<T, ?, ?>> HybridSourceBuilder<T, ToEnumT> addSource( SourceFactory<T, NextSourceT, ? super EnumT> sourceFactory, Boundedness boundedness) { if (!sources.isEmpty()) { Preconditions.checkArgument( Boundedness.BOUNDED.equals(sources.get(sources.size() - 1).boundedness), "All sources except the final source need to be bounded."); } ClosureCleaner.clean( sourceFactory, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true); sources.add(SourceListEntry.of(sourceFactory, boundedness)); return (HybridSourceBuilder) this; }
3.68
hadoop_IOStatisticsContextIntegration_getThreadSpecificIOStatisticsContext
/** * Get thread ID specific IOStatistics values if * statistics are enabled and the thread ID is in the map. * @param testThreadId thread ID. * @return IOStatisticsContext if found in the map. */ @VisibleForTesting public static IOStatisticsContext getThreadSpecificIOStatisticsContext(long testThreadId) { LOG.debug("IOStatsContext thread ID required: {}", testThreadId); if (!isThreadIOStatsEnabled) { return null; } // lookup the weakRef IOStatisticsContext for the thread ID in the // ThreadMap. WeakReference<IOStatisticsContext> ioStatisticsSnapshotWeakReference = ACTIVE_IOSTATS_CONTEXT.lookup(testThreadId); if (ioStatisticsSnapshotWeakReference != null) { return ioStatisticsSnapshotWeakReference.get(); } return null; }
3.68
morf_AbstractSqlDialectTest_testPreInsertWithPresetAutonumStatementsInsertingUnderAutonumLimit
/** * Tests the SQL statement that are run before a data insert. */ @SuppressWarnings("unchecked") @Test public void testPreInsertWithPresetAutonumStatementsInsertingUnderAutonumLimit() { compareStatements( expectedPreInsertStatementsInsertingUnderAutonumLimit(), testDialect.preInsertWithPresetAutonumStatements(metadata.getTable(TEST_TABLE), true), testDialect.preInsertWithPresetAutonumStatements(metadata.getTable(AUTO_NUMBER_TABLE), false) ); }
3.68
hadoop_SnappyCodec_createCompressor
/** * Create a new {@link Compressor} for use by this {@link CompressionCodec}. * * @return a new compressor for use by this codec */ @Override public Compressor createCompressor() { int bufferSize = conf.getInt( CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT); return new SnappyCompressor(bufferSize); }
3.68
hadoop_JWTRedirectAuthenticationHandler_getJWTFromCookie
/** * Encapsulate the acquisition of the JWT token from HTTP cookies within the * request. * * @param req servlet request to get the JWT token from * @return serialized JWT token */ protected String getJWTFromCookie(HttpServletRequest req) { String serializedJWT = null; Cookie[] cookies = req.getCookies(); if (cookies != null) { for (Cookie cookie : cookies) { if (cookieName.equals(cookie.getName())) { LOG.info(cookieName + " cookie has been found and is being processed"); serializedJWT = cookie.getValue(); break; } } } return serializedJWT; }
3.68
hadoop_IrqHandler_getName
/** * @return the signal name. */ public String getName() { return name; }
3.68
AreaShop_RentRegion_getFormattedInactiveTimeUntilUnrent
/** * Get a human readable string indicating how long the player can be offline until automatic unrent. * @return String indicating the inactive time until unrent */ public String getFormattedInactiveTimeUntilUnrent() { return Utils.millisToHumanFormat(getInactiveTimeUntilUnrent()); }
3.68
framework_AbsoluteLayoutConnector_updateCaption
/* * (non-Javadoc) * * @see com.vaadin.client.HasComponentsConnector#updateCaption(com.vaadin * .client.ComponentConnector) */ @Override public void updateCaption(ComponentConnector childConnector) { VAbsoluteLayout absoluteLayoutWidget = getWidget(); boolean captionIsNeeded = VCaption.isNeeded(childConnector); VCaption caption = absoluteLayoutWidget .getWidgetCaption(childConnector.getWidget()); if (captionIsNeeded) { if (caption == null) { caption = new VCaption(childConnector, getConnection()); } absoluteLayoutWidget.setWidgetCaption(childConnector.getWidget(), caption); } else if (caption != null) { absoluteLayoutWidget.setWidgetCaption(childConnector.getWidget(), null); } }
3.68
hadoop_TFile_advance
/** * Move the cursor to the next key-value pair. The entry returned by the * previous entry() call will be invalid. * * @return true if the cursor successfully moves. False when cursor is * already at the end location and cannot be advanced. * @throws IOException raised on errors performing I/O. */ public boolean advance() throws IOException { if (atEnd()) { return false; } int curBid = currentLocation.getBlockIndex(); long curRid = currentLocation.getRecordIndex(); long entriesInBlock = reader.getBlockEntryCount(curBid); if (curRid + 1 >= entriesInBlock) { if (endLocation.compareTo(curBid + 1, 0) <= 0) { // last entry in TFile. parkCursorAtEnd(); } else { // last entry in Block. initBlock(curBid + 1); } } else { inBlockAdvance(1); } return true; }
3.68
framework_VTabsheetPanel_getVisibleWidget
/** * Gets the index of the currently-visible widget. * * @return the visible widget's index */ public int getVisibleWidget() { return getWidgetIndex(visibleWidget); }
3.68
hbase_MetricsConnection_updateRpcGeneric
/** Update call stats for non-critical-path methods */ private void updateRpcGeneric(String methodName, CallStats stats) { getMetric(DRTN_BASE + methodName, rpcTimers, timerFactory).update(stats.getCallTimeMs(), TimeUnit.MILLISECONDS); getMetric(REQ_BASE + methodName, rpcHistograms, histogramFactory) .update(stats.getRequestSizeBytes()); getMetric(RESP_BASE + methodName, rpcHistograms, histogramFactory) .update(stats.getResponseSizeBytes()); }
3.68
shardingsphere-elasticjob_ExecutionService_misfireIfHasRunningItems
/** * Set misfire flag if sharding items still running. * * @param items sharding items need to be set misfire flag * @return is misfired for this schedule time or not */ public boolean misfireIfHasRunningItems(final Collection<Integer> items) { if (!hasRunningItems(items)) { return false; } setMisfire(items); return true; }
3.68
flink_HiveSourceBuilder_setProjectedFields
/** * Sets the indices of projected fields. * * @param projectedFields indices of the fields, starting from 0 */ public HiveSourceBuilder setProjectedFields(int[] projectedFields) { this.projectedFields = projectedFields; return this; }
3.68
hadoop_StripedReconstructor_initDecodingValidatorIfNecessary
// Initialize decoding validator protected void initDecodingValidatorIfNecessary() { if (isValidationEnabled && validator == null) { validator = new DecodingValidator(decoder); } }
3.68
framework_DefaultSQLGenerator_generateUpdateQuery
/* * (non-Javadoc) * * @see com.vaadin.addon.sqlcontainer.query.generator.SQLGenerator# * generateUpdateQuery(java.lang.String, * com.vaadin.addon.sqlcontainer.RowItem) */ @Override public StatementHelper generateUpdateQuery(String tableName, RowItem item) { if (tableName == null || tableName.trim().equals("")) { throw new IllegalArgumentException("Table name must be given."); } if (item == null) { throw new IllegalArgumentException("Updated item must be given."); } StatementHelper sh = getStatementHelper(); StringBuilder query = new StringBuilder(); query.append("UPDATE ").append(tableName).append(" SET"); /* Generate column<->value and rowidentifiers map */ Map<String, Object> columnToValueMap = generateColumnToValueMap(item); Map<String, Object> rowIdentifiers = generateRowIdentifiers(item); /* Generate columns and values to update */ boolean first = true; for (String column : columnToValueMap.keySet()) { if (first) { query.append(" " + QueryBuilder.quote(column) + " = ?"); } else { query.append(", " + QueryBuilder.quote(column) + " = ?"); } sh.addParameterValue(columnToValueMap.get(column), item.getItemProperty(column).getType()); first = false; } /* Generate identifiers for the row to be updated */ first = true; for (String column : rowIdentifiers.keySet()) { if (first) { query.append(" WHERE " + QueryBuilder.quote(column) + " = ?"); } else { query.append(" AND " + QueryBuilder.quote(column) + " = ?"); } sh.addParameterValue(rowIdentifiers.get(column), item.getItemProperty(column).getType()); first = false; } sh.setQueryString(query.toString()); return sh; }
3.68
hadoop_ServiceRecord_getExternalEndpoint
/** * Look up an external endpoint * @param api API * @return the endpoint or null if there was no match */ public Endpoint getExternalEndpoint(String api) { return findByAPI(external, api); }
3.68
hadoop_FederationStateStoreFacade_getDelegationTokenSeqNum
/** * Get SeqNum from stateStore. * * @return delegationTokenSequenceNumber. */ public int getDelegationTokenSeqNum() { return stateStore.getDelegationTokenSeqNum(); }
3.68
hmily_HmilyHashLoadBalance_select
/** * Use load balancing to select invoker. * * @param invocation invocation * @return Invoker * @throws NoInvokerException NoInvokerException */ public Invoker<T> select(final InvokeContext invocation) throws NoInvokerException { long hash = Math.abs(StringUtils.convertLong(invocation.getAttachment(Constants.TARS_HASH), 0)); List<Invoker<T>> staticWeightInvokers = staticWeightInvokersCache; if (staticWeightInvokers != null && !staticWeightInvokers.isEmpty()) { Invoker<T> invoker = staticWeightInvokers.get((int) (hash % staticWeightInvokers.size())); if (invoker.isAvailable()) { return invoker; } ServantInvokerAliveStat stat = ServantInvokerAliveChecker.get(invoker.getUrl()); if (stat.isAlive() || (stat.getLastRetryTime() + (config.getTryTimeInterval() * 1000)) < System.currentTimeMillis()) { LOGGER.info("try to use inactive invoker|" + invoker.getUrl().toIdentityString()); stat.setLastRetryTime(System.currentTimeMillis()); return invoker; } } List<Invoker<T>> sortedInvokers = sortedInvokersCache; if (sortedInvokers == null || sortedInvokers.isEmpty()) { throw new NoInvokerException("no such active connection invoker"); } List<Invoker<T>> list = new ArrayList<Invoker<T>>(); for (Invoker<T> invoker : sortedInvokers) { if (!invoker.isAvailable()) { ServantInvokerAliveStat stat = ServantInvokerAliveChecker.get(invoker.getUrl()); if (stat.isAlive() || (stat.getLastRetryTime() + (config.getTryTimeInterval() * 1000)) < System.currentTimeMillis()) { list.add(invoker); } } else { list.add(invoker); } } //TODO When all is not available. Whether to randomly extract one if (list.isEmpty()) { throw new NoInvokerException(config.getSimpleObjectName() + " try to select active invoker, size=" + sortedInvokers.size() + ", no such active connection invoker"); } Invoker<T> invoker = list.get((int) (hash % list.size())); if (!invoker.isAvailable()) { LOGGER.info("try to use inactive invoker|" + invoker.getUrl().toIdentityString()); ServantInvokerAliveChecker.get(invoker.getUrl()).setLastRetryTime(System.currentTimeMillis()); } return HmilyLoadBalanceUtils.doSelect(invoker, sortedInvokersCache); }
3.68
hbase_SnapshotManifest_getSnapshotDescription
/** * Get the SnapshotDescription */ public SnapshotDescription getSnapshotDescription() { return this.desc; }
3.68
hadoop_LocalityMulticastAMRMProxyPolicy_addAnyRR
/** * Add an ANY request to the final answer. */ private void addAnyRR(SubClusterId targetId, ResourceRequest rr) { Preconditions .checkArgument(ResourceRequest.isAnyLocation(rr.getResourceName())); internalAddToAnswer(targetId, rr, false); }
3.68
hadoop_RegistryPathUtils_parentOf
/** * Get the parent of a path * @param path path to look at * @return the parent path * @throws PathNotFoundException if the path was at root. */ public static String parentOf(String path) throws PathNotFoundException { List<String> elements = split(path); int size = elements.size(); if (size == 0) { throw new PathNotFoundException("No parent of " + path); } if (size == 1) { return "/"; } elements.remove(size - 1); StringBuilder parent = new StringBuilder(path.length()); for (String element : elements) { parent.append("/"); parent.append(element); } return parent.toString(); }
3.68
framework_VFilterSelect_isJustClosed
/** * Was the popup just closed? * * @return true if popup was just closed */ public boolean isJustClosed() { debug("VFS.SP: justClosed()"); final long now = (new Date()).getTime(); return (lastAutoClosed > 0 && (now - lastAutoClosed) < 200); }
3.68
streampipes_ElasticsearchApiCallBridge_cleanup
/** * Perform any necessary state cleanup. */ public void cleanup() { // nothing to cleanup }
3.68
hudi_HoodieTableFileSystemView_fetchAllStoredFileGroups
/** * Given a partition path, obtain all filegroups within that. All methods, that work at the partition level go through * this. */ @Override Stream<HoodieFileGroup> fetchAllStoredFileGroups(String partition) { final List<HoodieFileGroup> fileGroups = new ArrayList<>(partitionToFileGroupsMap.get(partition)); return fileGroups.stream(); }
3.68
framework_Upload_removeChangeListener
/** * Removes a filename change event listener. * * @param listener * the listener to be removed */ @Deprecated public void removeChangeListener(ChangeListener listener) { super.removeListener(EventId.CHANGE, ChangeEvent.class, listener); }
3.68
hbase_HBaseRpcController_getTableName
/** Returns Region's table name or null if not available or pertinent. */ default TableName getTableName() { return null; }
3.68
flink_CheckedThread_trySync
/** * Waits with timeout until the thread is completed and checks whether any error occurred during * the execution. * * <p>This method blocks like {@link #join()}, but performs an additional check for exceptions * thrown from the {@link #go()} method. */ public void trySync(long timeout) throws Exception { join(timeout); checkError(); }
3.68
hadoop_TimelineEvents_setEvents
/** * Set the event list to the given list of events * * @param events * a list of events */ public void setEvents(List<TimelineEvent> events) { this.events = events; }
3.68
hbase_MobFile_readCell
/** * Reads a cell from the mob file. * @param search The cell need to be searched in the mob file. * @param cacheMobBlocks Should this scanner cache blocks. * @param readPt the read point. * @return The cell in the mob file. */ public MobCell readCell(Cell search, boolean cacheMobBlocks, long readPt) throws IOException { StoreFileScanner scanner = null; boolean succ = false; try { List<StoreFileScanner> sfScanners = StoreFileScanner.getScannersForStoreFiles( Collections.singletonList(sf), cacheMobBlocks, true, false, false, readPt); if (!sfScanners.isEmpty()) { scanner = sfScanners.get(0); if (scanner.seek(search)) { MobCell mobCell = new MobCell(scanner.peek(), scanner); succ = true; return mobCell; } } return null; } finally { if (scanner != null && !succ) { scanner.close(); } } }
3.68
hadoop_AzureNativeFileSystemStore_acquireLease
/** * Get a lease on the blob identified by key. This lease will be renewed * indefinitely by a background thread. */ @Override public SelfRenewingLease acquireLease(String key) throws AzureException { LOG.debug("acquiring lease on {}", key); try { checkContainer(ContainerAccessType.ReadThenWrite); CloudBlobWrapper blob = getBlobReference(key); return blob.acquireLease(); } catch (Exception e) { // Caught exception while attempting to get lease. Re-throw as an // Azure storage exception. throw new AzureException(e); } }
3.68
hadoop_AzureNativeFileSystemStore_isConcurrentOOBAppendAllowed
/** * Check if concurrent reads and writes on the same blob are allowed. * * @return true if concurrent reads and OOB writes has been configured, false * otherwise. */ private boolean isConcurrentOOBAppendAllowed() { return tolerateOobAppends; }
3.68
framework_AbstractSelect_isMultiSelect
/** * Is the select in multiselect mode? In multiselect mode * * @return the Value of property multiSelect. */ public boolean isMultiSelect() { return multiSelect; }
3.68
framework_DesignAttributeHandler_findGetterForAttribute
/** * Returns a getter that can be used for reading the given design attribute * value from the class * * @param clazz * the class that is scanned for getters * @param attribute * the design attribute to find getter for * @return the getter method or null if not found */ private static Method findGetterForAttribute(Class<?> clazz, String attribute) { resolveSupportedAttributes(clazz); return CACHE.get(clazz).getGetter(attribute); }
3.68
hbase_RSGroupInfo_getTables
/** * Get set of tables that are members of the group. * @deprecated Since 3.0.0, will be removed in 4.0.0. The rsgroup information will be stored in * the configuration of a table so this will be removed. */ @Deprecated public SortedSet<TableName> getTables() { return tables; }
3.68
pulsar_KubernetesFunctionAuthProvider_initialize
/** * @deprecated use * {@link #initialize(CoreV1Api, byte[], java.util.function.Function, Map)} */ @Deprecated(since = "3.0.0") default void initialize(CoreV1Api coreClient, byte[] caBytes, java.util.function.Function<Function.FunctionDetails, String> namespaceCustomizerFunc) { setCaBytes(caBytes); setNamespaceProviderFunc(namespaceCustomizerFunc); initialize(coreClient); }
3.68
hadoop_ApplicationRowKey_parseRowKeyFromString
/** * Given the encoded row key as string, returns the row key as an object. * @param encodedRowKey String representation of row key. * @return A <cite>ApplicationRowKey</cite> object. */ public static ApplicationRowKey parseRowKeyFromString(String encodedRowKey) { return new ApplicationRowKeyConverter().decodeFromString(encodedRowKey); }
3.68
framework_ColorUtil_stringToColor
/** * Parses {@link Color} from any of the following {@link String} inputs: * <br> * - RGB hex (e.g. "#FFAA00"), {@link #HEX_PATTERN}<br> * - RGB "function" (e.g. "rgb(128,0,255)"), {@link #RGB_PATTERN}<br> * - RGBA "function" (e.g. "rgba(50,50,50,0.2)"), {@link #RGBA_PATTERN}<br> * - HSL "function" (e.g. "hsl(50,50,50)"), {@link #HSL_PATTERN}<br> * - HSLA "function" (e.g. "hsl(50,50,50,0.2)"), {@link #HSLA_PATTERN} * <p> * Parsing is case-insensitive. * * @param input * String input * @return {@link Color} parsed from input * @throws NumberFormatException * Input does not match any recognized pattern */ public static Color stringToColor(String input) { Matcher m = HEX_PATTERN.matcher(input); if (m.matches()) { return getHexPatternColor(m); } m = RGB_PATTERN.matcher(input); if (m.matches()) { return getRGBPatternColor(m); } m = RGBA_PATTERN.matcher(input); if (m.matches()) { return getRGBAPatternColor(m); } m = HSL_PATTERN.matcher(input); if (m.matches()) { return getHSLPatternColor(m); } m = HSLA_PATTERN.matcher(input); if (m.matches()) { return getHSLAPatternColor(m); } throw new NumberFormatException("Parsing color from input failed."); }
3.68
hmily_Binder_getSource
/** * Gets source. * * @return the source */ ConfigPropertySource getSource() { if (source == null) { return Binder.this.source; } return source; }
3.68
querydsl_BeanPath_createTime
/** * Create a new Time path * * @param <A> * @param property property name * @param type property type * @return property path */ @SuppressWarnings("unchecked") protected <A extends Comparable> TimePath<A> createTime(String property, Class<? super A> type) { return add(new TimePath<A>((Class) type, forProperty(property))); }
3.68
hadoop_ContainerContext_getContainerType
/** * Get {@link ContainerType} the type of the container * being initialized or stopped. * * @return the type of the container */ public ContainerType getContainerType() { return containerType; }
3.68
querydsl_GeometryExpression_eq
/* (non-Javadoc) * @see com.querydsl.core.types.dsl.SimpleExpression#eq(com.querydsl.core.types.Expression) */ @Override public BooleanExpression eq(Expression<? super T> right) { return Expressions.booleanOperation(SpatialOps.EQUALS, mixin, right); }
3.68
hbase_NettyUnsafeUtils_getTotalPendingOutboundBytes
/** * Get total bytes pending write to socket */ public static long getTotalPendingOutboundBytes(Channel channel) { ChannelOutboundBuffer outboundBuffer = channel.unsafe().outboundBuffer(); // can be null when the channel is closing if (outboundBuffer == null) { return 0; } return outboundBuffer.totalPendingWriteBytes(); }
3.68
framework_LegacyLocatorStrategy_getElementByPath
/** * {@inheritDoc} */ @Override public Element getElementByPath(String path) { return getElementByPathStartingAt(path, null); }
3.68
querydsl_ExpressionUtils_notInAny
/** * Create a {@code left not in right and...} expression for each list * * @param <D> * @param left * @param lists * @return a {@code left not in right and...} expression */ public static <D> Predicate notInAny(Expression<D> left, Iterable<? extends Collection<? extends D>> lists) { BooleanBuilder rv = new BooleanBuilder(); for (Collection<? extends D> list : lists) { rv.and(notIn(left, list)); } return rv; }
3.68
hbase_SnapshotManifest_getSnapshotFormat
/* * Return the snapshot format */ private static int getSnapshotFormat(final SnapshotDescription desc) { return desc.hasVersion() ? desc.getVersion() : SnapshotManifestV1.DESCRIPTOR_VERSION; }
3.68
shardingsphere-elasticjob_SetUpFacade_registerStartUpInfo
/** * Register start up info. * * @param enabled enable job on startup */ public void registerStartUpInfo(final boolean enabled) { listenerManager.startAllListeners(); leaderService.electLeader(); serverService.persistOnline(enabled); instanceService.persistOnline(); if (!reconcileService.isRunning()) { reconcileService.startAsync(); } serverService.removeOfflineServers(); }
3.68
hadoop_PlacementConstraintManagerService_validateSourceTags
/** * Validates whether the allocation tags that will enable a constraint have * the expected format. At the moment we support a single allocation tag per * constraint. * * @param sourceTags the source allocation tags * @return true if the tags have the expected format */ protected boolean validateSourceTags(Set<String> sourceTags) { if (sourceTags.isEmpty()) { LOG.warn("A placement constraint cannot be associated with an empty " + "set of tags."); return false; } if (sourceTags.size() > 1) { LOG.warn("Only a single tag can be associated with a placement " + "constraint currently."); return false; } return true; }
3.68
hbase_HBaseTestingUtility_await
/** * Await the successful return of {@code condition}, sleeping {@code sleepMillis} between * invocations. */ public static void await(final long sleepMillis, final BooleanSupplier condition) throws InterruptedException { try { while (!condition.getAsBoolean()) { Thread.sleep(sleepMillis); } } catch (RuntimeException e) { if (e.getCause() instanceof AssertionError) { throw (AssertionError) e.getCause(); } throw e; } }
3.68
framework_ExpandingContainer_removeContainerProperty
/** * @throws UnsupportedOperationException * always */ @Override public boolean removeContainerProperty(Object propertyId) { throw new UnsupportedOperationException(); }
3.68
querydsl_Expressions_asComparable
/** * Create a new ComparableExpression * * @param value Comparable * @return new ComparableExpression */ public static <T extends Comparable<?>> ComparableExpression<T> asComparable(T value) { return asComparable(constant(value)); }
3.68
framework_DesignContext_removeComponentCreationListener
/** * Removes a component creation listener. * * @param listener * the component creation listener to be removed * @deprecated Use a {@link Registration} object returned by * {@link #addComponentCreationListener(ComponentCreationListener)} * a listener */ @Deprecated public void removeComponentCreationListener( ComponentCreationListener listener) { listeners.remove(listener); }
3.68
zxing_AztecCode_isCompact
/** * @return {@code true} if compact instead of full mode */ public boolean isCompact() { return compact; }
3.68
hbase_FirstKeyOnlyFilter_parseFrom
/** * Parse a serialized representation of {@link FirstKeyOnlyFilter} * @param pbBytes A pb serialized {@link FirstKeyOnlyFilter} instance * @return An instance of {@link FirstKeyOnlyFilter} made from <code>bytes</code> * @throws DeserializationException if an error occurred * @see #toByteArray */ public static FirstKeyOnlyFilter parseFrom(final byte[] pbBytes) throws DeserializationException { // There is nothing to deserialize. Why do this at all? try { FilterProtos.FirstKeyOnlyFilter.parseFrom(pbBytes); } catch (InvalidProtocolBufferException e) { throw new DeserializationException(e); } // Just return a new instance. return new FirstKeyOnlyFilter(); }
3.68
hbase_MasterFileSystem_checkTempDir
/** * Make sure the hbase temp directory exists and is empty. NOTE that this method is only executed * once just after the master becomes the active one. */ void checkTempDir(final Path tmpdir, final Configuration c, final FileSystem fs) throws IOException { // If the temp directory exists, clear the content (left over, from the previous run) if (fs.exists(tmpdir)) { // Archive table in temp, maybe left over from failed deletion, // if not the cleaner will take care of them. for (Path tableDir : FSUtils.getTableDirs(fs, tmpdir)) { HFileArchiver.archiveRegions(c, fs, this.rootdir, tableDir, FSUtils.getRegionDirs(fs, tableDir)); if (!FSUtils.getRegionDirs(fs, tableDir).isEmpty()) { LOG.warn("Found regions in tmp dir after archiving table regions, {}", tableDir); } } // if acl sync to hdfs is enabled, then skip delete tmp dir because ACLs are set if (!SnapshotScannerHDFSAclHelper.isAclSyncToHdfsEnabled(c) && !fs.delete(tmpdir, true)) { throw new IOException("Unable to clean the temp directory: " + tmpdir); } } // Create the temp directory if (!fs.exists(tmpdir)) { if (isSecurityEnabled) { if (!fs.mkdirs(tmpdir, secureRootSubDirPerms)) { throw new IOException("HBase temp directory '" + tmpdir + "' creation failure."); } } else { if (!fs.mkdirs(tmpdir)) { throw new IOException("HBase temp directory '" + tmpdir + "' creation failure."); } } } }
3.68
dubbo_ServiceAnnotationPostProcessor_findServiceBeanDefinitionHolders
/** * Finds a {@link Set} of {@link BeanDefinitionHolder BeanDefinitionHolders} whose bean type annotated * {@link Service} Annotation. * * @param scanner {@link ClassPathBeanDefinitionScanner} * @param packageToScan pachage to scan * @param registry {@link BeanDefinitionRegistry} * @return non-null * @since 2.5.8 */ private Set<BeanDefinitionHolder> findServiceBeanDefinitionHolders( ClassPathBeanDefinitionScanner scanner, String packageToScan, BeanDefinitionRegistry registry, BeanNameGenerator beanNameGenerator) { Set<BeanDefinition> beanDefinitions = scanner.findCandidateComponents(packageToScan); Set<BeanDefinitionHolder> beanDefinitionHolders = new LinkedHashSet<>(beanDefinitions.size()); for (BeanDefinition beanDefinition : beanDefinitions) { String beanName = beanNameGenerator.generateBeanName(beanDefinition, registry); BeanDefinitionHolder beanDefinitionHolder = new BeanDefinitionHolder(beanDefinition, beanName); beanDefinitionHolders.add(beanDefinitionHolder); } return beanDefinitionHolders; }
3.68
hadoop_PendingSet_load
/** * Load an instance from a file, then validate it. * @param fs filesystem * @param path path * @param status status of file to load * @return the loaded instance * @throws IOException IO failure * @throws ValidationFailure if the data is invalid */ public static PendingSet load(FileSystem fs, Path path, @Nullable FileStatus status) throws IOException { LOG.debug("Reading pending commits in file {}", path); PendingSet instance = serializer().load(fs, path, status); instance.validate(); return instance; }
3.68
hmily_TarsHmilyConfiguration_hmilyCommunicatorBeanPostProcessor
/** * add HmilyCommunicatorBeanPostProcessor. * * @param communicator communicator * @return HmilyCommunicatorBeanPostProcessor */ @Bean public TarsHmilyCommunicatorBeanPostProcessor hmilyCommunicatorBeanPostProcessor(final Communicator communicator) { return new TarsHmilyCommunicatorBeanPostProcessor(communicator); }
3.68
morf_AbstractSqlDialectTest_testUpdateWithSelectMinimum
/** * Tests update SQL using a select minimum. */ @Test public void testUpdateWithSelectMinimum() { SelectStatement stmt = new SelectStatement(min(new FieldReference(INT_FIELD))) .from(new TableReference(TEST_TABLE).as("T")) .where(and( eq(new FieldReference(new TableReference("T"), CHAR_FIELD), new FieldLiteral("S")), eq(new FieldReference(new TableReference("T"), STRING_FIELD), new FieldReference(new TableReference("O"), STRING_FIELD)), eq(new FieldReference(new TableReference("T"), INT_FIELD), new FieldReference(new TableReference("O"), INT_FIELD)) ) ); UpdateStatement updateStmt = new UpdateStatement(new TableReference(OTHER_TABLE).as("O")) .set(new FieldFromSelect(stmt).as(INT_FIELD)) .where(eq(new FieldReference(STRING_FIELD), new FieldLiteral("Y"))); assertEquals("Update scripts are not the same", expectedUpdateWithSelectMinimum(), testDialect.convertStatementToSQL(updateStmt)); }
3.68
hadoop_AbfsStatistic_getStatName
/** * Getter for statistic name. * * @return Name of statistic. */ public String getStatName() { return statName; }
3.68
hbase_ReplicationSourceManager_addSource
/** * Add a normal source for the given peer on this region server. Meanwhile, add new replication * queue to storage. For the newly added peer, we only need to enqueue the latest log of each wal * group and do replication. * <p/> * We add a {@code init} parameter to indicate whether this is part of the initialization process. * If so, we should skip adding the replication queues as this may introduce dead lock on region * server start up and hbase:replication table online. * @param peerId the id of the replication peer * @param init whether this call is part of the initialization process * @return the source that was created */ void addSource(String peerId, boolean init) throws IOException { ReplicationPeer peer = replicationPeers.getPeer(peerId); if ( ReplicationUtils.LEGACY_REGION_REPLICATION_ENDPOINT_NAME .equals(peer.getPeerConfig().getReplicationEndpointImpl()) ) { // we do not use this endpoint for region replication any more, see HBASE-26233 LOG.info("Legacy region replication peer found, skip adding: {}", peer.getPeerConfig()); return; } ReplicationQueueId queueId = new ReplicationQueueId(server.getServerName(), peerId); ReplicationSourceInterface src = createSource(new ReplicationQueueData(queueId, ImmutableMap.of()), peer); // synchronized on latestPaths to avoid missing the new log synchronized (this.latestPaths) { this.sources.put(peerId, src); Map<String, NavigableSet<String>> walsByGroup = new HashMap<>(); this.walsById.put(queueId, walsByGroup); // Add the latest wal to that source's queue if (!latestPaths.isEmpty()) { for (Map.Entry<String, Path> walPrefixAndPath : latestPaths.entrySet()) { Path walPath = walPrefixAndPath.getValue(); NavigableSet<String> wals = new TreeSet<>(); wals.add(walPath.getName()); walsByGroup.put(walPrefixAndPath.getKey(), wals); if (!init) { // Abort RS and throw exception to make add peer failed // Ideally we'd better use the current file size as offset so we can skip replicating // the data before adding replication peer, but the problem is that the file may not end // at a valid entry's ending, and the current WAL Reader implementation can not deal // with reading from the middle of a WAL entry. Can improve later. abortAndThrowIOExceptionWhenFail( () -> this.queueStorage.setOffset(queueId, walPrefixAndPath.getKey(), new ReplicationGroupOffset(walPath.getName(), 0), Collections.emptyMap())); } src.enqueueLog(walPath); LOG.trace("Enqueued {} to source {} during source creation.", walPath, src.getQueueId()); } } } ReplicationPeerConfig peerConfig = peer.getPeerConfig(); if (peerConfig.isSyncReplication()) { syncReplicationPeerMappingManager.add(peer.getId(), peerConfig); } src.startup(); }
3.68
hudi_SpillableMapUtils_computePayloadSize
/** * Compute a bytes representation of the payload by serializing the contents This is used to estimate the size of the * payload (either in memory or when written to disk). */ public static <R> long computePayloadSize(R value, SizeEstimator<R> valueSizeEstimator) throws IOException { return valueSizeEstimator.sizeEstimate(value); }
3.68
hbase_BufferedMutatorParams_getWriteBufferPeriodicFlushTimerTickMs
/** * @deprecated Since 3.0.0, will be removed in 4.0.0. We use a common timer in the whole client * implementation so you can not set it any more. */ @Deprecated public long getWriteBufferPeriodicFlushTimerTickMs() { return writeBufferPeriodicFlushTimerTickMs; }
3.68
hudi_BaseFileUtils_readMinMaxRecordKeys
/** * Read the min and max record key from the metadata of the given data file. * @param configuration Configuration * @param filePath The data file path * @return A array of two string where the first is min record key and the second is max record key */ public String[] readMinMaxRecordKeys(Configuration configuration, Path filePath) { Map<String, String> minMaxKeys = readFooter(configuration, true, filePath, HoodieBloomFilterWriteSupport.HOODIE_MIN_RECORD_KEY_FOOTER, HoodieBloomFilterWriteSupport.HOODIE_MAX_RECORD_KEY_FOOTER); if (minMaxKeys.size() != 2) { throw new HoodieException( String.format("Could not read min/max record key out of footer correctly from %s. read) : %s", filePath, minMaxKeys)); } return new String[] {minMaxKeys.get(HoodieBloomFilterWriteSupport.HOODIE_MIN_RECORD_KEY_FOOTER), minMaxKeys.get(HoodieBloomFilterWriteSupport.HOODIE_MAX_RECORD_KEY_FOOTER)}; }
3.68
framework_ContainerOrderedWrapper_removePropertySetChangeListener
/* * Removes a Property set change listener from the object. Don't add a * JavaDoc comment here, we use the default documentation from implemented * interface. */ @Override public void removePropertySetChangeListener( Container.PropertySetChangeListener listener) { if (container instanceof Container.PropertySetChangeNotifier) { ((Container.PropertySetChangeNotifier) container) .removePropertySetChangeListener( new PiggybackListener(listener)); } }
3.68
hadoop_ListResultEntrySchema_withGroup
/** * Set the group value. * * @param group the group value to set * @return the ListEntrySchema object itself. */ public ListResultEntrySchema withGroup(final String group) { this.group = group; return this; }
3.68
hadoop_YarnVersionInfo_getUrl
/** * Get the subversion URL for the root YARN directory. * * @return URL for the root YARN directory. */ public static String getUrl() { return YARN_VERSION_INFO._getUrl(); }
3.68
hadoop_FilePosition_blockNumber
/** * Gets the id of the current block. * * @return the id of the current block. */ public int blockNumber() { throwIfInvalidBuffer(); return blockData.getBlockNumber(bufferStartOffset); }
3.68
framework_ApplicationConnection_analyzeLayouts
/** * Requests an analyze of layouts, to find inconsistencies. Exclusively used * for debugging during development. * * @deprecated as of 7.1. Replaced by {@link UIConnector#analyzeLayouts()} */ @Deprecated public void analyzeLayouts() { getUIConnector().analyzeLayouts(); }
3.68
hbase_CellBuilderFactory_create
/** * Create a CellBuilder instance. * @param type indicates which memory copy is used in building cell. * @return An new CellBuilder */ public static CellBuilder create(CellBuilderType type) { switch (type) { case SHALLOW_COPY: return new IndividualBytesFieldCellBuilder(); case DEEP_COPY: return new KeyValueBuilder(); default: throw new UnsupportedOperationException("The type:" + type + " is unsupported"); } }
3.68
morf_DefaultAdditionalSchemaDataImpl_columnDefaultValue
/** * @see org.alfasoftware.morf.excel.AdditionalSchemaData#columnDefaultValue(Table, java.lang.String) */ @Override public String columnDefaultValue(Table table, String columnName) { return StringUtils.EMPTY; }
3.68
framework_Table_isColumnReorderingAllowed
/** * Checks if column reordering is allowed. * * @return true if columns can be reordered; false otherwise. */ public boolean isColumnReorderingAllowed() { return columnReorderingAllowed; }
3.68
dubbo_DefaultFuture_closeChannel
/** * close a channel when a channel is inactive * directly return the unfinished requests. * * @param channel channel to close */ public static void closeChannel(Channel channel, long timeout) { long deadline = timeout > 0 ? System.currentTimeMillis() + timeout : 0; for (Map.Entry<Long, Channel> entry : CHANNELS.entrySet()) { if (channel.equals(entry.getValue())) { DefaultFuture future = getFuture(entry.getKey()); if (future != null && !future.isDone()) { long restTime = deadline - System.currentTimeMillis(); if (restTime > 0) { try { future.get(restTime, TimeUnit.MILLISECONDS); } catch (java.util.concurrent.TimeoutException ignore) { logger.warn( PROTOCOL_TIMEOUT_SERVER, "", "", "Trying to close channel " + channel + ", but response is not received in " + timeout + "ms, and the request id is " + future.id); } catch (Throwable ignore) { } } if (!future.isDone()) { respInactive(channel, future); } } } } }
3.68
hadoop_AbfsClientThrottlingAnalyzer_timerOrchestrator
/** * Synchronized method to suspend or resume timer. * @param timerFunctionality resume or suspend. * @param timerTask The timertask object. * @return true or false. */ private synchronized boolean timerOrchestrator(TimerFunctionality timerFunctionality, TimerTask timerTask) { switch (timerFunctionality) { case RESUME: if (isOperationOnAccountIdle.get()) { resumeTimer(); } break; case SUSPEND: if (accountLevelThrottlingEnabled && (System.currentTimeMillis() - lastExecutionTime.get() >= getOperationIdleTimeout())) { isOperationOnAccountIdle.set(true); timerTask.cancel(); timer.purge(); return true; } break; default: break; } return false; }
3.68
hudi_QuickstartConfigurations_sql
/** * Creates the tool to build hoodie table DDL. */ public static Sql sql(String tableName) { return new Sql(tableName); }
3.68
hbase_KeyStoreFileType_getDefaultFileExtension
/** * The file extension that is associated with this file type. */ public String getDefaultFileExtension() { return defaultFileExtension; }
3.68
morf_AbstractSqlDialectTest_expectedSqlForMathOperationsForExistingDataFix3
/** * @return the expected SQL for math operation for existing data fix 3 */ protected String expectedSqlForMathOperationsForExistingDataFix3() { return "MAX(assetLocationDate * 100000 + assetLocationTime)"; }
3.68
hbase_ScannerModel_getEndRow
/** Returns end row */ @XmlAttribute public byte[] getEndRow() { return endRow; }
3.68
hadoop_TFile_checkTFileDataIndex
/** * Lazily loading the TFile index. * * @throws IOException */ synchronized void checkTFileDataIndex() throws IOException { if (tfileIndex == null) { BlockReader brIndex = readerBCF.getMetaBlock(TFileIndex.BLOCK_NAME); try { tfileIndex = new TFileIndex(readerBCF.getBlockCount(), brIndex, tfileMeta .getComparator()); } finally { brIndex.close(); } } }
3.68
hbase_MetricsConnection_newCallStats
/** Produce an instance of {@link CallStats} for clients to attach to RPCs. */ public static CallStats newCallStats() { // TODO: instance pool to reduce GC? return new CallStats(); }
3.68
hbase_ServerManager_updateLastFlushedSequenceIds
/** * Updates last flushed sequence Ids for the regions on server sn */ private void updateLastFlushedSequenceIds(ServerName sn, ServerMetrics hsl) { for (Entry<byte[], RegionMetrics> entry : hsl.getRegionMetrics().entrySet()) { byte[] encodedRegionName = Bytes.toBytes(RegionInfo.encodeRegionName(entry.getKey())); Long existingValue = flushedSequenceIdByRegion.get(encodedRegionName); long l = entry.getValue().getCompletedSequenceId(); // Don't let smaller sequence ids override greater sequence ids. if (LOG.isTraceEnabled()) { LOG.trace(Bytes.toString(encodedRegionName) + ", existingValue=" + existingValue + ", completeSequenceId=" + l); } if (existingValue == null || (l != HConstants.NO_SEQNUM && l > existingValue)) { flushedSequenceIdByRegion.put(encodedRegionName, l); } else if (l != HConstants.NO_SEQNUM && l < existingValue) { LOG.warn("RegionServer " + sn + " indicates a last flushed sequence id (" + l + ") that is less than the previous last flushed sequence id (" + existingValue + ") for region " + Bytes.toString(entry.getKey()) + " Ignoring."); } ConcurrentNavigableMap<byte[], Long> storeFlushedSequenceId = computeIfAbsent(storeFlushedSequenceIdsByRegion, encodedRegionName, () -> new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR)); for (Entry<byte[], Long> storeSeqId : entry.getValue().getStoreSequenceId().entrySet()) { byte[] family = storeSeqId.getKey(); existingValue = storeFlushedSequenceId.get(family); l = storeSeqId.getValue(); if (LOG.isTraceEnabled()) { LOG.trace(Bytes.toString(encodedRegionName) + ", family=" + Bytes.toString(family) + ", existingValue=" + existingValue + ", completeSequenceId=" + l); } // Don't let smaller sequence ids override greater sequence ids. if (existingValue == null || (l != HConstants.NO_SEQNUM && l > existingValue.longValue())) { storeFlushedSequenceId.put(family, l); } } } }
3.68
graphhopper_Unzipper_unzip
/** * @param progressListener updates not in percentage but the number of bytes already read. */ public void unzip(InputStream fromIs, File toFolder, LongConsumer progressListener) throws IOException { if (!toFolder.exists()) toFolder.mkdirs(); long sumBytes = 0; ZipInputStream zis = new ZipInputStream(fromIs); try { ZipEntry ze = zis.getNextEntry(); byte[] buffer = new byte[8 * 1024]; while (ze != null) { if (ze.isDirectory()) { getVerifiedFile(toFolder, ze).mkdir(); } else { double factor = 1; if (ze.getCompressedSize() > 0 && ze.getSize() > 0) factor = (double) ze.getCompressedSize() / ze.getSize(); File newFile = getVerifiedFile(toFolder, ze); FileOutputStream fos = new FileOutputStream(newFile); try { int len; while ((len = zis.read(buffer)) > 0) { fos.write(buffer, 0, len); sumBytes += len * factor; if (progressListener != null) progressListener.accept(sumBytes); } } finally { fos.close(); } } ze = zis.getNextEntry(); } zis.closeEntry(); } finally { zis.close(); } }
3.68
hadoop_ActiveAuditManagerS3A_beforeTransmission
/** * Forward to the inner span. * {@inheritDoc} */ @Override public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { span.beforeTransmission(context, executionAttributes); }
3.68
zxing_MinimalEncoder_encode
/** * Encodes input minimally and returns an array of the codewords * * @param input The string to encode * @param priorityCharset The preferred {@link Charset}. When the value of the argument is null, the algorithm * chooses charsets that leads to a minimal representation. Otherwise the algorithm will use the priority * charset to encode any character in the input that can be encoded by it if the charset is among the * supported charsets. * @param fnc1 denotes the character in the input that represents the FNC1 character or -1 if this is not a GS1 * bar code. If the value is not -1 then a FNC1 is also prepended. * @param shape requested shape. * @param macroId Prepends the specified macro function in case that a value of 5 or 6 is specified. * @return An array of bytes representing the codewords of a minimal encoding. */ static byte[] encode(String input, Charset priorityCharset, int fnc1, SymbolShapeHint shape, int macroId) { return encodeMinimally(new Input(input, priorityCharset, fnc1, shape, macroId)).getBytes(); }
3.68
framework_VGridLayout_getAvailableHeight
/** * @return total of spanned rows */ private int getAvailableHeight() { int height = rowHeights[row]; for (int i = 1; i < rowspan; i++) { height += getVerticalSpacing() + rowHeights[row + i]; } return height; }
3.68
flink_SingleInputUdfOperator_returns
/** * Adds a type information hint about the return type of this operator. This method can be used * in cases where Flink cannot determine automatically what the produced type of a function is. * That can be the case if the function uses generic type variables in the return type that * cannot be inferred from the input type. * * <p>In most cases, the methods {@link #returns(Class)} and {@link #returns(TypeHint)} are * preferable. * * @param typeInfo The type information for the returned data type. * @return This operator using the given type information for the return type. */ public O returns(TypeInformation<OUT> typeInfo) { requireNonNull(typeInfo, "TypeInformation must not be null"); fillInType(typeInfo); @SuppressWarnings("unchecked") O returnType = (O) this; return returnType; }
3.68
flink_CliFrontend_setJobManagerAddressInConfig
/** * Writes the given job manager address to the associated configuration object. * * @param address Address to write to the configuration * @param config The configuration to write to */ static void setJobManagerAddressInConfig(Configuration config, InetSocketAddress address) { config.setString(JobManagerOptions.ADDRESS, address.getHostString()); config.setInteger(JobManagerOptions.PORT, address.getPort()); config.setString(RestOptions.ADDRESS, address.getHostString()); config.setInteger(RestOptions.PORT, address.getPort()); }
3.68
dubbo_DataParseUtils_writeFormContent
/** * content-type form * * @param formData * @param outputStream * @throws Exception */ public static void writeFormContent(Map formData, OutputStream outputStream) throws Exception { outputStream.write(serializeForm(formData, Charset.defaultCharset()).getBytes()); }
3.68
MagicPlugin_PreLoadEvent_registerBlockBuildManager
/** * Register a BlockBuildManager, for controlling whether or not players can place blocks with magic. * * @param manager The manager to add. */ public void registerBlockBuildManager(BlockBuildManager manager) { blockBuildManager.add(manager); }
3.68