name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_FieldAccessor_getFieldType
/** * Gets the TypeInformation for the type of the field. Note: For an array of a primitive type, * it returns the corresponding basic type (Integer for int[]). */ @SuppressWarnings("unchecked") public TypeInformation<F> getFieldType() { return fieldType; }
3.68
framework_VTabsheetBase_setClient
/** * For internal use only. May be removed or replaced in the future. * * @param client * the current application connection instance */ public void setClient(ApplicationConnection client) { this.client = client; }
3.68
graphhopper_FootAccessParser_getAccess
/** * Some ways are okay but not separate for pedestrians. */ public WayAccess getAccess(ReaderWay way) { String highwayValue = way.getTag("highway"); if (highwayValue == null) { WayAccess acceptPotentially = WayAccess.CAN_SKIP; if (FerrySpeedCalculator.isFerry(way)) { String footTag = way.getTag("foot"); if (footTag == null || intendedValues.contains(footTag)) acceptPotentially = WayAccess.FERRY; } // special case not for all acceptedRailways, only platform if (way.hasTag("railway", "platform")) acceptPotentially = WayAccess.WAY; if (way.hasTag("man_made", "pier")) acceptPotentially = WayAccess.WAY; if (!acceptPotentially.canSkip()) { if (way.hasTag(restrictions, restrictedValues) && !getConditionalTagInspector().isRestrictedWayConditionallyPermitted(way)) return WayAccess.CAN_SKIP; return acceptPotentially; } return WayAccess.CAN_SKIP; } // other scales are too dangerous, see http://wiki.openstreetmap.org/wiki/Key:sac_scale if (way.getTag("sac_scale") != null && !way.hasTag("sac_scale", allowedSacScale)) return WayAccess.CAN_SKIP; boolean permittedWayConditionallyRestricted = getConditionalTagInspector().isPermittedWayConditionallyRestricted(way); boolean restrictedWayConditionallyPermitted = getConditionalTagInspector().isRestrictedWayConditionallyPermitted(way); String firstValue = way.getFirstPriorityTag(restrictions); if (!firstValue.isEmpty()) { String[] restrict = firstValue.split(";"); for (String value : restrict) { if (restrictedValues.contains(value) && !restrictedWayConditionallyPermitted) return WayAccess.CAN_SKIP; if (intendedValues.contains(value) && !permittedWayConditionallyRestricted) return WayAccess.WAY; } } if (way.hasTag("sidewalk", sidewalkValues)) return WayAccess.WAY; if (!allowedHighwayTags.contains(highwayValue)) return WayAccess.CAN_SKIP; if (way.hasTag("motorroad", "yes")) return WayAccess.CAN_SKIP; if (isBlockFords() && ("ford".equals(highwayValue) || way.hasTag("ford"))) return WayAccess.CAN_SKIP; if (permittedWayConditionallyRestricted) return WayAccess.CAN_SKIP; return WayAccess.WAY; }
3.68
hbase_CoprocessorClassLoader_clearCache
// This method is used in unit test public static void clearCache() { classLoadersCache.clear(); }
3.68
pulsar_PublicSuffixMatcher_matches
/** * Tests whether the given domain matches any of entry from the public suffix list. * * @param domain * @param expectedType expected domain type or {@code null} if any. * @return {@code true} if the given domain matches any of the public suffixes. * * @since 4.5 */ public boolean matches(final String domain, final DomainType expectedType) { if (domain == null) { return false; } final String domainRoot = getDomainRoot( domain.startsWith(".") ? domain.substring(1) : domain, expectedType); return domainRoot == null; }
3.68
hbase_CellSetModel_addRow
/** * Add a row to this cell set * @param row the row */ public void addRow(RowModel row) { rows.add(row); }
3.68
framework_VDateField_setCurrentLocale
/** * Sets the locale String. * * @param currentLocale * the new locale String. */ public void setCurrentLocale(String currentLocale) { this.currentLocale = currentLocale; }
3.68
hadoop_AdminACLsManager_getOwner
/** * Returns the owner * * @return Current user at the time of object creation */ public UserGroupInformation getOwner() { return owner; }
3.68
hbase_Bytes_readByteArray
/** * Read byte-array written with a WritableableUtils.vint prefix. * @param in Input to read from. * @return byte array read off <code>in</code> * @throws IOException e */ public static byte[] readByteArray(final DataInput in) throws IOException { int len = WritableUtils.readVInt(in); if (len < 0) { throw new NegativeArraySizeException(Integer.toString(len)); } byte[] result = new byte[len]; in.readFully(result, 0, len); return result; }
3.68
querydsl_MetaDataExporter_setExportForeignKeys
/** * Set whether foreign keys should be exported * * @param exportForeignKeys */ public void setExportForeignKeys(boolean exportForeignKeys) { this.exportForeignKeys = exportForeignKeys; }
3.68
hbase_RegionHDFSBlockLocationFinder_getDescriptor
/** * return TableDescriptor for a given tableName * @param tableName the table name */ private TableDescriptor getDescriptor(TableName tableName) throws IOException { ClusterInfoProvider service = this.provider; if (service == null) { return null; } return service.getTableDescriptor(tableName); }
3.68
framework_PropertysetItem_addListener
/** * @deprecated As of 7.0, replaced by * {@link #addPropertySetChangeListener(Item.PropertySetChangeListener)} */ @Override @Deprecated public void addListener(Item.PropertySetChangeListener listener) { addPropertySetChangeListener(listener); }
3.68
hadoop_ResourceUsageMatcher_getProgress
/** * Returns the average progress. */ @Override public float getProgress() { if (emulationPlugins.size() > 0) { // return the average progress float progress = 0f; for (ResourceUsageEmulatorPlugin emulator : emulationPlugins) { // consider weighted progress of each emulator progress += emulator.getProgress(); } return progress / emulationPlugins.size(); } // if no emulators are configured then return 1 return 1f; }
3.68
framework_DragSourceExtension_setDataTransferData
/** * Sets data for this drag source element with the given type. The data is * set for the client side draggable element using {@code * DataTransfer.setData(type, data)} method. * <p> * Note that {@code "text"} is the only cross browser supported data type. * Use {@link #setDataTransferText(String)} method instead if your * application supports IE11. * * @param type * Type of the data to be set for the client side draggable * element, e.g. {@code text/plain}. Cannot be {@code null}. * @param data * Data to be set for the client side draggable element. Cannot * be {@code null}. */ public void setDataTransferData(String type, String data) { if (type == null) { throw new IllegalArgumentException("Data type cannot be null"); } if (data == null) { throw new IllegalArgumentException("Data cannot be null"); } if (!getState(false).types.contains(type)) { getState().types.add(type); } getState().data.put(type, data); }
3.68
hbase_SimpleRpcServerResponder_doRespond
// // Enqueue a response from the application. // void doRespond(SimpleServerRpcConnection conn, RpcResponse resp) throws IOException { boolean added = false; // If there is already a write in progress, we don't wait. This allows to free the handlers // immediately for other tasks. if (conn.responseQueue.isEmpty() && conn.responseWriteLock.tryLock()) { try { if (conn.responseQueue.isEmpty()) { // If we're alone, we can try to do a direct call to the socket. It's // an optimization to save on context switches and data transfer between cores.. if (processResponse(conn, resp)) { return; // we're done. } // Too big to fit, putting ahead. conn.responseQueue.addFirst(resp); added = true; // We will register to the selector later, outside of the lock. } } finally { conn.responseWriteLock.unlock(); } } if (!added) { conn.responseQueue.addLast(resp); } registerForWrite(conn); }
3.68
dubbo_LFUCache_proceedEviction
/** * Evicts less frequently used elements corresponding to eviction factor, * specified at instantiation step. * * @return number of evicted elements */ private int proceedEviction() { int targetSize = capacity - evictionCount; int evictedElements = 0; FREQ_TABLE_ITER_LOOP: for (int i = 0; i <= capacity; i++) { CacheNode<K, V> node; while (!freqTable[i].isEmpty()) { node = freqTable[i].pollFirst(); remove(node.key); if (targetSize >= curSize) { break FREQ_TABLE_ITER_LOOP; } evictedElements++; } } return evictedElements; }
3.68
hadoop_CredentialProviderListFactory_buildAWSProviderList
/** * Load list of AWS credential provider/credential provider factory classes; * support a forbidden list to prevent loops, mandate full secrets, etc. * @param binding Binding URI -may be null * @param conf configuration * @param key configuration key to use * @param forbidden a possibly empty set of forbidden classes. * @param defaultValues list of default providers. * @return the list of classes, possibly empty * @throws IOException on a failure to load the list. */ public static AWSCredentialProviderList buildAWSProviderList( @Nullable final URI binding, final Configuration conf, final String key, final List<Class<?>> defaultValues, final Set<Class<?>> forbidden) throws IOException { // build up the base provider Collection<String> awsClasses = loadAWSProviderClasses(conf, key, defaultValues.toArray(new Class[defaultValues.size()])); Map<String, String> v1v2CredentialProviderMap = V1_V2_CREDENTIAL_PROVIDER_MAP; final Set<String> forbiddenClassnames = forbidden.stream().map(c -> c.getName()).collect(Collectors.toSet()); // iterate through, checking for forbidden values and then instantiating // each provider AWSCredentialProviderList providers = new AWSCredentialProviderList(); for (String className : awsClasses) { if (v1v2CredentialProviderMap.containsKey(className)) { // mapping final String mapped = v1v2CredentialProviderMap.get(className); LOG_REMAPPED_ENTRY.warn("Credentials option {} contains AWS v1 SDK entry {}; mapping to {}", key, className, mapped); className = mapped; } // now scan the forbidden list. doing this after any mappings ensures the v1 names // are also blocked if (forbiddenClassnames.contains(className)) { throw new InstantiationIOException(InstantiationIOException.Kind.Forbidden, binding, className, key, E_FORBIDDEN_AWS_PROVIDER, null); } AwsCredentialsProvider provider; try { provider = createAWSV2CredentialProvider(conf, className, binding, key); } catch (InstantiationIOException e) { // failed to create a v2; try to see if it is a v1 if (e.getKind() == InstantiationIOException.Kind.IsNotImplementation) { if (isAwsV1SdkAvailable()) { // try to create v1 LOG.debug("Failed to create {} as v2 credentials, trying to instantiate as v1", className); try { provider = AwsV1BindingSupport.createAWSV1CredentialProvider(conf, className, binding, key); LOG_REMAPPED_ENTRY.warn("Credentials option {} contains AWS v1 SDK entry {}", key, className); } catch (InstantiationIOException ex) { // if it is something other than non-implementation, throw. // that way, non-impl messages are about v2 not v1 in the error if (ex.getKind() != InstantiationIOException.Kind.IsNotImplementation) { throw ex; } else { throw e; } } } else { LOG.warn("Failed to instantiate {} as AWS v2 SDK credential provider;" + " AWS V1 SDK is not on the classpth so unable to attempt to" + " instantiate as a v1 provider", className, e); throw e; } } else { // any other problem throw e; } LOG.debug("From provider class {} created Aws provider {}", className, provider); } providers.add(provider); } return providers; }
3.68
framework_AbstractLegacyComponent_isReadOnly
/** * Tests whether the component is in the read-only mode. The user can not * change the value of a read-only component. As only {@code AbstractField} * or {@code LegacyField} components normally have a value that can be input * or changed by the user, this is mostly relevant only to field components, * though not restricted to them. * * <p> * Notice that the read-only mode only affects whether the user can change * the <i>value</i> of the component; it is possible to, for example, scroll * a read-only table. * </p> * * <p> * The method will return {@code true} if the component or any of its * parents is in the read-only mode. * </p> * * @return <code>true</code> if the component or any of its parents is in * read-only mode, <code>false</code> if not. * @see #setReadOnly(boolean) */ @Override public boolean isReadOnly() { return getState(false).readOnly; }
3.68
hadoop_RateLimitingFactory_unlimitedRate
/** * Get the unlimited rate. * @return a rate limiter which always has capacity. */ public static RateLimiting unlimitedRate() { return UNLIMITED; }
3.68
querydsl_SQLExpressions_avg
/** * Start a window function expression * * @param expr expression * @return avg(expr) */ public static <T extends Number> WindowOver<T> avg(Expression<T> expr) { return new WindowOver<T>(expr.getType(), Ops.AggOps.AVG_AGG, expr); }
3.68
hudi_GenericRecordPartialPayloadGenerator_validate
// Atleast 1 entry should be null private boolean validate(Object object) { if (object == null) { return true; } else if (object instanceof GenericRecord) { for (Schema.Field field : ((GenericRecord) object).getSchema().getFields()) { boolean ret = validate(((GenericRecord) object).get(field.name())); if (ret) { return ret; } } } return false; }
3.68
framework_GridLayout_getColumn1
/** * Gets the column of the top-left corner cell. * * @return the column of the top-left corner cell. */ public int getColumn1() { return childData.column1; }
3.68
hadoop_NativeAzureFileSystemHelper_checkForAzureStorageException
/* * Helper method to recursively check if the cause of the exception is * a Azure storage exception. */ public static Throwable checkForAzureStorageException(Exception e) { Throwable innerException = e.getCause(); while (innerException != null && !(innerException instanceof StorageException)) { innerException = innerException.getCause(); } return innerException; }
3.68
flink_Costs_getHeuristicNetworkCost
/** * Gets the heuristic network cost. * * @return The heuristic network cost, in bytes to be transferred. */ public double getHeuristicNetworkCost() { return this.heuristicNetworkCost; }
3.68
flink_JobGraphGenerator_preVisit
/** * This methods implements the pre-visiting during a depth-first traversal. It create the job * vertex and sets local strategy. * * @param node The node that is currently processed. * @return True, if the visitor should descend to the node's children, false if not. * @see org.apache.flink.util.Visitor#preVisit(org.apache.flink.util.Visitable) */ @Override public boolean preVisit(PlanNode node) { // check if we have visited this node before. in non-tree graphs, this happens if (this.vertices.containsKey(node) || this.chainedTasks.containsKey(node) || this.iterations.containsKey(node)) { // return false to prevent further descend return false; } // the vertex to be created for the current node final JobVertex vertex; try { if (node instanceof SinkPlanNode) { vertex = createDataSinkVertex((SinkPlanNode) node); } else if (node instanceof SourcePlanNode) { vertex = createDataSourceVertex((SourcePlanNode) node); } else if (node instanceof BulkIterationPlanNode) { BulkIterationPlanNode iterationNode = (BulkIterationPlanNode) node; // for the bulk iteration, we skip creating anything for now. we create the graph // for the step function in the post visit. // check that the root of the step function has the same parallelism as the // iteration. // because the tail must have the same parallelism as the head, we can only merge // the last // operator with the tail, if they have the same parallelism. not merging is // currently not // implemented PlanNode root = iterationNode.getRootOfStepFunction(); if (root.getParallelism() != node.getParallelism()) { throw new CompilerException( "Error: The final operator of the step " + "function has a different parallelism than the iteration operator itself."); } IterationDescriptor descr = new IterationDescriptor(iterationNode, this.iterationIdEnumerator++); this.iterations.put(iterationNode, descr); vertex = null; } else if (node instanceof WorksetIterationPlanNode) { WorksetIterationPlanNode iterationNode = (WorksetIterationPlanNode) node; // we have the same constraints as for the bulk iteration PlanNode nextWorkSet = iterationNode.getNextWorkSetPlanNode(); PlanNode solutionSetDelta = iterationNode.getSolutionSetDeltaPlanNode(); if (nextWorkSet.getParallelism() != node.getParallelism()) { throw new CompilerException( "It is currently not supported that the final operator of the step " + "function has a different parallelism than the iteration operator itself."); } if (solutionSetDelta.getParallelism() != node.getParallelism()) { throw new CompilerException( "It is currently not supported that the final operator of the step " + "function has a different parallelism than the iteration operator itself."); } IterationDescriptor descr = new IterationDescriptor(iterationNode, this.iterationIdEnumerator++); this.iterations.put(iterationNode, descr); vertex = null; } else if (node instanceof SingleInputPlanNode) { vertex = createSingleInputVertex((SingleInputPlanNode) node); } else if (node instanceof DualInputPlanNode) { vertex = createDualInputVertex((DualInputPlanNode) node); } else if (node instanceof NAryUnionPlanNode) { // skip the union for now vertex = null; } else if (node instanceof BulkPartialSolutionPlanNode) { // create a head node (or not, if it is merged into its successor) vertex = createBulkIterationHead((BulkPartialSolutionPlanNode) node); } else if (node instanceof SolutionSetPlanNode) { // this represents an access into the solution set index. // we do not create a vertex for the solution set here (we create the head at the // workset place holder) // we adjust the joins / cogroups that go into the solution set here for (Channel c : node.getOutgoingChannels()) { DualInputPlanNode target = (DualInputPlanNode) c.getTarget(); JobVertex accessingVertex = this.vertices.get(target); TaskConfig conf = new TaskConfig(accessingVertex.getConfiguration()); int inputNum = c == target.getInput1() ? 0 : c == target.getInput2() ? 1 : -1; // sanity checks if (inputNum == -1) { throw new CompilerException(); } // adjust the driver if (conf.getDriver().equals(JoinDriver.class)) { conf.setDriver( inputNum == 0 ? JoinWithSolutionSetFirstDriver.class : JoinWithSolutionSetSecondDriver.class); } else if (conf.getDriver().equals(CoGroupDriver.class)) { conf.setDriver( inputNum == 0 ? CoGroupWithSolutionSetFirstDriver.class : CoGroupWithSolutionSetSecondDriver.class); } else { throw new CompilerException( "Found join with solution set using incompatible operator (only Join/CoGroup are valid)."); } } // make sure we do not visit this node again. for that, we add a 'already seen' // entry into one of the sets this.chainedTasks.put(node, ALREADY_VISITED_PLACEHOLDER); vertex = null; } else if (node instanceof WorksetPlanNode) { // create the iteration head here vertex = createWorksetIterationHead((WorksetPlanNode) node); } else { throw new CompilerException("Unrecognized node type: " + node.getClass().getName()); } } catch (Exception e) { throw new CompilerException( "Error translating node '" + node + "': " + e.getMessage(), e); } // check if a vertex was created, or if it was chained or skipped if (vertex != null) { // set parallelism int pd = node.getParallelism(); vertex.setParallelism(pd); vertex.setMaxParallelism(pd); vertex.setSlotSharingGroup(sharingGroup); // check whether this vertex is part of an iteration step function if (this.currentIteration != null) { // check that the task has the same parallelism as the iteration as such PlanNode iterationNode = (PlanNode) this.currentIteration; if (iterationNode.getParallelism() < pd) { throw new CompilerException( "Error: All functions that are part of an iteration must have the same, or a lower, parallelism than the iteration operator."); } // store the id of the iterations the step functions participate in IterationDescriptor descr = this.iterations.get(this.currentIteration); new TaskConfig(vertex.getConfiguration()).setIterationId(descr.getId()); } // store in the map this.vertices.put(node, vertex); } // returning true causes deeper descend return true; }
3.68
framework_WindowElement_getCaption
/** * @return the caption of the window */ @Override public String getCaption() { return findElement(By.className(HEADER_CLASS)).getText(); }
3.68
hbase_AsyncTableBuilder_setMaxRetries
/** * Set the max retry times for an operation. Usually it is the max attempt times minus 1. * <p> * Operation timeout and max attempt times(or max retry times) are both limitations for retrying, * we will stop retrying when we reach any of the limitations. * @see #setMaxAttempts(int) * @see #setOperationTimeout(long, TimeUnit) */ default AsyncTableBuilder<C> setMaxRetries(int maxRetries) { return setMaxAttempts(retries2Attempts(maxRetries)); }
3.68
morf_HumanReadableStatementProducer_removeColumn
/** @see org.alfasoftware.morf.upgrade.SchemaEditor#removeColumn(java.lang.String, org.alfasoftware.morf.metadata.Column) **/ @Override public void removeColumn(String tableName, Column definition) { consumer.schemaChange(HumanReadableStatementHelper.generateRemoveColumnString(tableName, definition)); }
3.68
hbase_RegionInfo_toByteArray
/** * Returns This instance serialized as protobuf w/ a magic pb prefix. * @see #parseFrom(byte[]) */ static byte[] toByteArray(RegionInfo ri) { byte[] bytes = ProtobufUtil.toRegionInfo(ri).toByteArray(); return ProtobufUtil.prependPBMagic(bytes); }
3.68
flink_BulkPartialSolutionNode_getOperator
/** * Gets the operator (here the {@link PartialSolutionPlaceHolder}) that is represented by this * optimizer node. * * @return The operator represented by this optimizer node. */ @Override public PartialSolutionPlaceHolder<?> getOperator() { return (PartialSolutionPlaceHolder<?>) super.getOperator(); }
3.68
framework_ComputedStyle_getBoxSizing
/** * Returns the value of the boxSizing property. * * @return the value of the boxSizing property */ private String getBoxSizing() { return getProperty("boxSizing"); }
3.68
flink_GenericDataSinkBase_getUserCodeWrapper
/** * Gets the class describing the output format. * * <p>This method is basically identical to {@link #getFormatWrapper()}. * * @return The class describing the output format. * @see org.apache.flink.api.common.operators.Operator#getUserCodeWrapper() */ @Override public UserCodeWrapper<? extends OutputFormat<IN>> getUserCodeWrapper() { return this.formatWrapper; }
3.68
pulsar_BrokerInterceptors_load
/** * Load the broker event interceptor for the given <tt>interceptor</tt> list. * * @param conf the pulsar broker service configuration * @return the collection of broker event interceptor */ public static BrokerInterceptor load(ServiceConfiguration conf) throws IOException { BrokerInterceptorDefinitions definitions = BrokerInterceptorUtils.searchForInterceptors(conf.getBrokerInterceptorsDirectory(), conf.getNarExtractionDirectory()); ImmutableMap.Builder<String, BrokerInterceptorWithClassLoader> builder = ImmutableMap.builder(); conf.getBrokerInterceptors().forEach(interceptorName -> { BrokerInterceptorMetadata definition = definitions.interceptors().get(interceptorName); if (null == definition) { throw new RuntimeException("No broker interceptor is found for name `" + interceptorName + "`. Available broker interceptors are : " + definitions.interceptors()); } BrokerInterceptorWithClassLoader interceptor; try { interceptor = BrokerInterceptorUtils.load(definition, conf.getNarExtractionDirectory()); if (interceptor != null) { builder.put(interceptorName, interceptor); } log.info("Successfully loaded broker interceptor for name `{}`", interceptorName); } catch (IOException e) { log.error("Failed to load the broker interceptor for name `" + interceptorName + "`", e); throw new RuntimeException("Failed to load the broker interceptor for name `" + interceptorName + "`"); } }); Map<String, BrokerInterceptorWithClassLoader> interceptors = builder.build(); if (interceptors != null && !interceptors.isEmpty()) { return new BrokerInterceptors(interceptors); } else { return null; } }
3.68
framework_MultiSelect_deselectAll
/** * Deselects all currently selected items. */ public default void deselectAll() { getSelectedItems().forEach(this::deselect); }
3.68
framework_AbstractRemoteDataSource_dropFromCache
/** * Drop the given range of rows from this data source's cache. * * @param range * the range of rows to drop */ protected void dropFromCache(Range range) { for (int i = range.getStart(); i < range.getEnd(); i++) { // Called after dropping from cache. Dropped row is passed as a // parameter, but is no longer present in the DataSource T removed = indexToRowMap.remove(Integer.valueOf(i)); if (removed != null) { onDropFromCache(i, removed); keyToIndexMap.remove(getRowKey(removed)); } } }
3.68
hadoop_AbfsTokenRenewer_cancel
/** * Cancel the delegation token. * * @param token token to cancel. * @param conf configuration object. * @throws IOException thrown when trying get current user. * @throws InterruptedException thrown when thread is interrupted. */ @Override public void cancel(final Token<?> token, Configuration conf) throws IOException, InterruptedException { LOG.debug("Cancelling the delegation token"); getInstance(conf).cancelDelegationToken(token); }
3.68
hbase_ModeStrategyUtils_aggregateRecords
/** * Group by records on the basis of supplied groupBy field and Aggregate records using * {@link Record#combine(Record)} * @param records records needs to be processed * @param groupBy Field to be used for group by * @return aggregated records */ public static List<Record> aggregateRecords(List<Record> records, Field groupBy) { return records.stream().collect(Collectors.groupingBy(r -> r.get(groupBy))).entrySet().stream() .flatMap( e -> e.getValue().stream().reduce(Record::combine).map(Stream::of).orElse(Stream.empty())) .collect(Collectors.toList()); }
3.68
flink_SqlResourceType_symbol
/** * Creates a parse-tree node representing an occurrence of this keyword at a particular position * in the parsed text. */ public SqlLiteral symbol(SqlParserPos pos) { return SqlLiteral.createSymbol(this, pos); }
3.68
flink_ExpressionConverter_extractValue
/** * Extracts a value from a literal. Including planner-specific instances such as {@link * DecimalData}. */ @SuppressWarnings("unchecked") public static <T> T extractValue(ValueLiteralExpression literal, Class<T> clazz) { final Optional<Object> possibleObject = literal.getValueAs(Object.class); if (!possibleObject.isPresent()) { throw new TableException("Invalid literal."); } final Object object = possibleObject.get(); if (clazz.equals(BigDecimal.class)) { final Optional<BigDecimal> possibleDecimal = literal.getValueAs(BigDecimal.class); if (possibleDecimal.isPresent()) { return (T) possibleDecimal.get(); } if (object instanceof DecimalData) { return (T) ((DecimalData) object).toBigDecimal(); } } return literal.getValueAs(clazz) .orElseThrow(() -> new TableException("Unsupported literal class: " + clazz)); }
3.68
flink_AdvancedFunctionsExample_executeLastDatedValueFunction
/** * Aggregates data by name and returns the latest non-null {@code item_count} value with its * corresponding {@code order_date}. */ private static void executeLastDatedValueFunction(TableEnvironment env) { // create a table with example data final Table customers = env.fromValues( DataTypes.of("ROW<name STRING, order_date DATE, item_count INT>"), Row.of("Guillermo Smith", LocalDate.parse("2020-12-01"), 3), Row.of("Guillermo Smith", LocalDate.parse("2020-12-05"), 5), Row.of("Valeria Mendoza", LocalDate.parse("2020-03-23"), 4), Row.of("Valeria Mendoza", LocalDate.parse("2020-06-02"), 10), Row.of("Leann Holloway", LocalDate.parse("2020-05-26"), 9), Row.of("Leann Holloway", LocalDate.parse("2020-05-27"), null), Row.of("Brandy Sanders", LocalDate.parse("2020-10-14"), 1), Row.of("John Turner", LocalDate.parse("2020-10-02"), 12), Row.of("Ellen Ortega", LocalDate.parse("2020-06-18"), 100)); env.createTemporaryView("customers", customers); // register and execute the function env.createTemporarySystemFunction("LastDatedValueFunction", LastDatedValueFunction.class); env.executeSql( "SELECT name, LastDatedValueFunction(item_count, order_date) " + "FROM customers GROUP BY name") .print(); // clean up env.dropTemporaryView("customers"); }
3.68
hibernate-validator_BeanMetaDataManagerImpl_getAnnotationProcessingOptionsFromNonDefaultProviders
/** * @return returns the annotation ignores from the non annotation based meta data providers */ private AnnotationProcessingOptions getAnnotationProcessingOptionsFromNonDefaultProviders(List<MetaDataProvider> optionalMetaDataProviders) { AnnotationProcessingOptions options = new AnnotationProcessingOptionsImpl(); for ( MetaDataProvider metaDataProvider : optionalMetaDataProviders ) { options.merge( metaDataProvider.getAnnotationProcessingOptions() ); } return options; }
3.68
framework_FilesystemContainer_getParent
/* * Gets the parent item of the specified Item. Don't add a JavaDoc comment * here, we use the default documentation from implemented interface. */ @Override public Object getParent(Object itemId) { if (!(itemId instanceof File)) { return null; } return ((File) itemId).getParentFile(); }
3.68
hadoop_GPGPoliciesBlock_policyWeight2String
/** * We will convert the PolicyWeight to string format. * * @param weights PolicyWeight. * @return string format PolicyWeight. example: SC-1:0.91, SC-2:0.09 */ private String policyWeight2String(Map<SubClusterIdInfo, Float> weights) { StringBuilder sb = new StringBuilder(); for (Map.Entry<SubClusterIdInfo, Float> entry : weights.entrySet()) { sb.append(entry.getKey().toId()).append(": ").append(entry.getValue()).append(", "); } if (sb.length() > 2) { sb.setLength(sb.length() - 2); } return sb.toString(); }
3.68
pulsar_MessageUtils_messageConverter
/** * Message convert to FlatMessage. * * @param message * @return FlatMessage List */ public static List<FlatMessage> messageConverter(Message message) { try { if (message == null) { return null; } List<FlatMessage> flatMessages = new ArrayList<>(); List<CanalEntry.Entry> entrys = null; if (message.isRaw()) { List<ByteString> rawEntries = message.getRawEntries(); entrys = new ArrayList<CanalEntry.Entry>(rawEntries.size()); for (ByteString byteString : rawEntries) { CanalEntry.Entry entry = CanalEntry.Entry.parseFrom(byteString); entrys.add(entry); } } else { entrys = message.getEntries(); } for (CanalEntry.Entry entry : entrys) { if (entry.getEntryType() == CanalEntry.EntryType.TRANSACTIONBEGIN || entry.getEntryType() == CanalEntry.EntryType.TRANSACTIONEND) { continue; } CanalEntry.RowChange rowChange; try { rowChange = CanalEntry.RowChange.parseFrom(entry.getStoreValue()); } catch (Exception e) { throw new RuntimeException("ERROR ## parser of eromanga-event has an error, data:" + entry.toString(), e); } CanalEntry.EventType eventType = rowChange.getEventType(); FlatMessage flatMessage = new FlatMessage(message.getId()); flatMessages.add(flatMessage); flatMessage.setDatabase(entry.getHeader().getSchemaName()); flatMessage.setTable(entry.getHeader().getTableName()); flatMessage.setIsDdl(rowChange.getIsDdl()); flatMessage.setType(eventType.toString()); flatMessage.setEs(entry.getHeader().getExecuteTime()); flatMessage.setTs(System.currentTimeMillis()); flatMessage.setSql(rowChange.getSql()); if (!rowChange.getIsDdl()) { List<Map<String, String>> data = new ArrayList<>(); List<Map<String, String>> old = new ArrayList<>(); for (CanalEntry.RowData rowData : rowChange.getRowDatasList()) { if (eventType != CanalEntry.EventType.INSERT && eventType != CanalEntry.EventType.UPDATE && eventType != CanalEntry.EventType.DELETE) { continue; } List<CanalEntry.Column> columns; if (eventType == CanalEntry.EventType.DELETE) { columns = rowData.getBeforeColumnsList(); } else { columns = rowData.getAfterColumnsList(); } columns.size(); for (CanalEntry.Column column : columns) { Map<String, String> row = genColumn(column); if (column.getUpdated()) { row.put("updated", "1"); } else { row.put("updated", "0"); } data.add(row); } if (eventType == CanalEntry.EventType.UPDATE) { for (CanalEntry.Column column : rowData.getBeforeColumnsList()) { Map<String, String> rowOld = genColumn(column); old.add(rowOld); } } } if (!data.isEmpty()) { flatMessage.setData(data); } if (!old.isEmpty()) { flatMessage.setOld(old); } } } return flatMessages; } catch (Exception e) { throw new RuntimeException(e); } }
3.68
querydsl_GenericExporter_setHandleFields
/** * Set whether fields are handled (default true) * * @param b * @deprecated Use {@link #setPropertyHandling(PropertyHandling)} instead */ @Deprecated public void setHandleFields(boolean b) { handleFields = b; setPropertyHandling(); }
3.68
pulsar_BrokerService_isAllowAutoSubscriptionCreation
/** * @deprecated Avoid using the deprecated method * #{@link org.apache.pulsar.broker.resources.NamespaceResources#getPoliciesIfCached(NamespaceName)} and blocking * call. we can use #{@link BrokerService#isAllowAutoSubscriptionCreationAsync(TopicName)} to instead of it. */ @Deprecated public boolean isAllowAutoSubscriptionCreation(final TopicName topicName) { AutoSubscriptionCreationOverride autoSubscriptionCreationOverride = getAutoSubscriptionCreationOverride(topicName); if (autoSubscriptionCreationOverride != null) { return autoSubscriptionCreationOverride.isAllowAutoSubscriptionCreation(); } else { return pulsar.getConfiguration().isAllowAutoSubscriptionCreation(); } }
3.68
hbase_RegionMover_writeFile
/** * Write the number of regions moved in the first line followed by regions moved in subsequent * lines */ private void writeFile(String filename, List<RegionInfo> movedRegions) throws IOException { try (DataOutputStream dos = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(filename)))) { dos.writeInt(movedRegions.size()); for (RegionInfo region : movedRegions) { Bytes.writeByteArray(dos, RegionInfo.toByteArray(region)); } } catch (IOException e) { LOG.error("ERROR: Was Not able to write regions moved to output file but moved " + movedRegions.size() + " regions", e); throw e; } }
3.68
hbase_CreateNamespaceProcedure_setNamespaceQuota
/** * Set quota for the namespace * @param env MasterProcedureEnv * @param nsDescriptor NamespaceDescriptor **/ private static void setNamespaceQuota(final MasterProcedureEnv env, final NamespaceDescriptor nsDescriptor) throws IOException { if (env.getMasterServices().isInitialized()) { env.getMasterServices().getMasterQuotaManager().setNamespaceQuota(nsDescriptor); } }
3.68
flink_ExecNodeContext_resetIdCounter
/** Reset the id counter to 0. */ @VisibleForTesting public static void resetIdCounter() { idCounter.set(0); }
3.68
flink_Table_limit
/** * Limits a (possibly sorted) result to the first n rows from an offset position. * * <p>This method is a synonym for {@link #offset(int)} followed by {@link #fetch(int)}. */ default Table limit(int offset, int fetch) { return offset(offset).fetch(fetch); }
3.68
hadoop_AllocateResponse_allocatedContainers
/** * Set the <code>allocatedContainers</code> of the response. * @see AllocateResponse#setAllocatedContainers(List) * @param allocatedContainers * <code>allocatedContainers</code> of the response * @return {@link AllocateResponseBuilder} */ @Private @Unstable public AllocateResponseBuilder allocatedContainers( List<Container> allocatedContainers) { allocateResponse.setAllocatedContainers(allocatedContainers); return this; }
3.68
hadoop_MawoConfiguration_getWorkerWorkSpace
/** * Get worker work space. * @return value of worker.workspace */ public String getWorkerWorkSpace() { return configsMap.get(WORKER_WORK_SPACE); }
3.68
hbase_MemStoreLABImpl_close
/** * Close this instance since it won't be used any more, try to put the chunks back to pool */ @Override public void close() { if (!this.closed.compareAndSet(false, true)) { return; } // We could put back the chunks to pool for reusing only when there is no // opening scanner which will read their data this.refCnt.release(); }
3.68
hbase_CommonFSUtils_logFSTree
/** * Recursive helper to log the state of the FS * @see #logFileSystemState(FileSystem, Path, Logger) */ private static void logFSTree(Logger log, final FileSystem fs, final Path root, String prefix) throws IOException { FileStatus[] files = listStatus(fs, root, null); if (files == null) { return; } for (FileStatus file : files) { if (file.isDirectory()) { log.debug(prefix + file.getPath().getName() + "/"); logFSTree(log, fs, file.getPath(), prefix + "---"); } else { log.debug(prefix + file.getPath().getName()); } } }
3.68
hudi_OptionsResolver_isAppendMode
/** * Returns whether the insert is clustering disabled with given configuration {@code conf}. */ public static boolean isAppendMode(Configuration conf) { // 1. inline clustering is supported for COW table; // 2. async clustering is supported for both COW and MOR table return isInsertOperation(conf) && ((isCowTable(conf) && !conf.getBoolean(FlinkOptions.INSERT_CLUSTER)) || isMorTable(conf)); }
3.68
hadoop_BlockBlobAppendStream_generateBlockId
/** * Helper method that generates the next block id for uploading a block to * azure storage. * @return String representing the block ID generated. * @throws IOException if the stream is in invalid state */ private String generateBlockId() throws IOException { if (nextBlockCount == UNSET_BLOCKS_COUNT || blockIdPrefix == null) { throw new AzureException( "Append Stream in invalid state. nextBlockCount not set correctly"); } return (!blockIdPrefix.isEmpty()) ? generateNewerVersionBlockId(blockIdPrefix, nextBlockCount++) : generateOlderVersionBlockId(nextBlockCount++); }
3.68
flink_StateUtil_bestEffortDiscardAllStateObjects
/** * Iterates through the passed state handles and calls discardState() on each handle that is not * null. All occurring exceptions are suppressed and collected until the iteration is over and * emitted as a single exception. * * @param handlesToDiscard State handles to discard. Passed iterable is allowed to deliver null * values. * @throws Exception exception that is a collection of all suppressed exceptions that were * caught during iteration */ public static void bestEffortDiscardAllStateObjects( Iterable<? extends StateObject> handlesToDiscard) throws Exception { LambdaUtil.applyToAllWhileSuppressingExceptions( handlesToDiscard, StateObject::discardState); }
3.68
shardingsphere-elasticjob_RDBTracingStorageConfiguration_createDataSource
/** * Create data source. * * @return data source */ @SuppressWarnings({"unchecked", "rawtypes"}) @SneakyThrows(ReflectiveOperationException.class) public DataSource createDataSource() { DataSource result = (DataSource) Class.forName(dataSourceClassName).getConstructor().newInstance(); Method[] methods = result.getClass().getMethods(); for (Entry<String, Object> entry : props.entrySet()) { if (SKIPPED_PROPERTY_NAMES.contains(entry.getKey())) { continue; } Optional<Method> setterMethod = findSetterMethod(methods, entry.getKey()); if (setterMethod.isPresent()) { setterMethod.get().invoke(result, entry.getValue()); } } Optional<JDBCParameterDecorator> decorator = TypedSPILoader.findService(JDBCParameterDecorator.class, result.getClass()); return decorator.isPresent() ? decorator.get().decorate(result) : result; }
3.68
framework_AbstractOrderedLayout_isSpacing
/* * (non-Javadoc) * * @see com.vaadin.ui.Layout.SpacingHandler#isSpacing() */ @Override public boolean isSpacing() { return getState(false).spacing; }
3.68
pulsar_FieldParser_integerToString
/** * Converts Integer to String. * * @param value * The Integer to be converted. * @return The converted String value. */ public static String integerToString(Integer value) { return value.toString(); }
3.68
hudi_AvroSchemaCompatibility_getMessage
/** * Returns a human-readable message with more details about what failed. Syntax * depends on the SchemaIncompatibilityType. * * @return a String with details about the incompatibility. * @see #getType() */ public String getMessage() { return mMessage; }
3.68
hbase_AbstractRpcClient_cancelConnections
/** * Interrupt the connections to the given ip:port server. This should be called if the server is * known as actually dead. This will not prevent current operation to be retried, and, depending * on their own behavior, they may retry on the same server. This can be a feature, for example at * startup. In any case, they're likely to get connection refused (if the process died) or no * route to host: i.e. their next retries should be faster and with a safe exception. */ @Override public void cancelConnections(ServerName sn) { synchronized (connections) { for (T connection : connections.values()) { ConnectionId remoteId = connection.remoteId(); if ( remoteId.getAddress().getPort() == sn.getPort() && remoteId.getAddress().getHostName().equals(sn.getHostname()) ) { LOG.info("The server on " + sn.toString() + " is dead - stopping the connection " + connection.remoteId); connections.remove(remoteId, connection); connection.shutdown(); connection.cleanupConnection(); } } } }
3.68
morf_AbstractSqlDialectTest_likeEscapeSuffix
/** * On some databases our string literals need suffixing with explicit escape * character key word. * * @return suffix to insert after quoted string literal. */ protected String likeEscapeSuffix() { return " ESCAPE '\\'"; }
3.68
hadoop_CommitUtilsWithMR_formatAppAttemptDir
/** * Build the name of the job attempt directory. * @param jobUUID unique Job ID. * @param appAttemptId the ID of the application attempt for this job. * @return the directory tree for the application attempt */ public static String formatAppAttemptDir( String jobUUID, int appAttemptId) { return formatJobDir(jobUUID) + String.format("/%02d", appAttemptId); }
3.68
hbase_HMaster_isActiveMaster
/** * Report whether this master is currently the active master or not. If not active master, we are * parked on ZK waiting to become active. This method is used for testing. * @return true if active master, false if not. */ @Override public boolean isActiveMaster() { return activeMaster; }
3.68
pulsar_KerberosName_getRealm
/** * Get the realm of the name. * @return the realm of the name, may be null */ public String getRealm() { return realm; }
3.68
hadoop_ECBlock_isErased
/** * * @return true if it's erased due to erasure, otherwise false */ public boolean isErased() { return isErased; }
3.68
flink_SkipListUtils_putPrevIndexNode
/** * Puts previous key pointer on the given index level to key space. * * @param memorySegment memory segment for key space. * @param offset offset of key space in the memory segment. * @param totalLevel top level of the key. * @param level level of index. * @param prevKeyPointer previous key pointer on the given level. */ public static void putPrevIndexNode( MemorySegment memorySegment, int offset, int totalLevel, int level, long prevKeyPointer) { int of = getIndexOffset(offset, totalLevel, level); memorySegment.putLong(of, prevKeyPointer); }
3.68
morf_AbstractSqlDialectTest_testAddColumnNotNullable
/** * Test adding a non-nullable column. */ @Test public void testAddColumnNotNullable() { testAlterTableColumn(AlterationType.ADD, column("dateField_new", DataType.DATE).defaultValue("2010-01-01"), expectedAlterTableAddColumnNotNullableStatement()); }
3.68
framework_DataCommunicator_setMinPushSize
/** * Set minimum size of data which will be sent to the client when data * source is set. * <p> * Server doesn't send all data from data source to the client. It sends * some initial chunk of data (whose size is determined as minimum between * {@code size} parameter of this method and data size). Client decides * whether it is able to show more data and request server to send more data * (next chunk). * <p> * When component is disabled then client cannot communicate to the server * side (by design, because of security reasons). It means that client will * get <b>only</b> initial chunk of data whose size is set here. * * @param size * the size of initial data to send to the client */ public void setMinPushSize(int size) { if (size < 0) { throw new IllegalArgumentException("Value cannot be negative"); } minPushSize = size; }
3.68
hadoop_MRJobConfUtil_setTaskLogProgressDeltaThresholds
/** * load the values defined from a configuration file including the delta * progress and the maximum time between each log message. * @param conf */ public static void setTaskLogProgressDeltaThresholds( final Configuration conf) { if (progressMinDeltaThreshold == null) { progressMinDeltaThreshold = new Double(PROGRESS_MIN_DELTA_FACTOR * conf.getDouble(MRJobConfig.TASK_LOG_PROGRESS_DELTA_THRESHOLD, MRJobConfig.TASK_LOG_PROGRESS_DELTA_THRESHOLD_DEFAULT)); } if (progressMaxWaitDeltaTimeThreshold == null) { progressMaxWaitDeltaTimeThreshold = TimeUnit.SECONDS.toMillis(conf .getLong( MRJobConfig.TASK_LOG_PROGRESS_WAIT_INTERVAL_SECONDS, MRJobConfig.TASK_LOG_PROGRESS_WAIT_INTERVAL_SECONDS_DEFAULT)); } }
3.68
hbase_WALPlayer_usage
/** * Print usage * @param errorMsg Error message. Can be null. */ private void usage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); } System.err.println("Usage: " + NAME + " [options] <WAL inputdir> [<tables> <tableMappings>]"); System.err.println(" <WAL inputdir> directory of WALs to replay."); System.err.println(" <tables> comma separated list of tables. If no tables specified,"); System.err.println(" all are imported (even hbase:meta if present)."); System.err.println( " <tableMappings> WAL entries can be mapped to a new set of tables by " + "passing"); System.err .println(" <tableMappings>, a comma separated list of target " + "tables."); System.err .println(" If specified, each table in <tables> must have a " + "mapping."); System.err.println("To generate HFiles to bulk load instead of loading HBase directly, pass:"); System.err.println(" -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output"); System.err.println(" Only one table can be specified, and no mapping allowed!"); System.err.println("To specify a time range, pass:"); System.err.println(" -D" + WALInputFormat.START_TIME_KEY + "=[date|ms]"); System.err.println(" -D" + WALInputFormat.END_TIME_KEY + "=[date|ms]"); System.err.println(" The start and the end date of timerange (inclusive). The dates can be"); System.err .println(" expressed in milliseconds-since-epoch or yyyy-MM-dd'T'HH:mm:ss.SS " + "format."); System.err.println(" E.g. 1234567890120 or 2009-02-13T23:32:30.12"); System.err.println("Other options:"); System.err.println(" -D" + JOB_NAME_CONF_KEY + "=jobName"); System.err.println(" Use the specified mapreduce job name for the wal player"); System.err.println(" -Dwal.input.separator=' '"); System.err.println(" Change WAL filename separator (WAL dir names use default ','.)"); System.err.println("For performance also consider the following options:\n" + " -Dmapreduce.map.speculative=false\n" + " -Dmapreduce.reduce.speculative=false"); }
3.68
hudi_StreamSync_getDeducedSchemaProvider
/** * Apply schema reconcile and schema evolution rules(schema on read) and generate new target schema provider. * * @param incomingSchema schema of the source data * @param sourceSchemaProvider Source schema provider. * @return the SchemaProvider that can be used as writer schema. */ private SchemaProvider getDeducedSchemaProvider(Schema incomingSchema, SchemaProvider sourceSchemaProvider, HoodieTableMetaClient metaClient) { Option<Schema> latestTableSchemaOpt = UtilHelpers.getLatestTableSchema(hoodieSparkContext.jsc(), fs, cfg.targetBasePath, metaClient); Option<InternalSchema> internalSchemaOpt = HoodieConversionUtils.toJavaOption( HoodieSchemaUtils.getLatestTableInternalSchema( new HoodieConfig(HoodieStreamer.Config.getProps(fs, cfg)), metaClient)); // Deduce proper target (writer's) schema for the input dataset, reconciling its // schema w/ the table's one Schema targetSchema = HoodieSparkSqlWriter.deduceWriterSchema( incomingSchema, HoodieConversionUtils.toScalaOption(latestTableSchemaOpt), HoodieConversionUtils.toScalaOption(internalSchemaOpt), props); // Override schema provider with the reconciled target schema return new DelegatingSchemaProvider(props, hoodieSparkContext.jsc(), sourceSchemaProvider, new SimpleSchemaProvider(hoodieSparkContext.jsc(), targetSchema, props)); }
3.68
flink_JoinedStreams_with
/** * Completes the join operation with the user function that is executed for each combination * of elements with the same key in a window. * * <p><b>Note:</b> This is a temporary workaround while the {@link #apply(FlatJoinFunction, * TypeInformation)} method has the wrong return type and hence does not allow one to set an * operator-specific parallelism * * @deprecated This method will be removed once the {@link #apply(JoinFunction, * TypeInformation)} method is fixed in the next major version of Flink (2.0). */ @PublicEvolving @Deprecated public <T> SingleOutputStreamOperator<T> with( JoinFunction<T1, T2, T> function, TypeInformation<T> resultType) { return (SingleOutputStreamOperator<T>) apply(function, resultType); }
3.68
hadoop_AzureBlobFileSystemStore_generateContinuationTokenForNonXns
// generate continuation token for non-xns account private String generateContinuationTokenForNonXns(String path, final String firstEntryName) { Preconditions.checkArgument(!Strings.isNullOrEmpty(firstEntryName) && !firstEntryName.startsWith(AbfsHttpConstants.ROOT_PATH), "startFrom must be a dir/file name and it can not be a full path"); // Notice: non-xns continuation token requires full path (first "/" is not included) for startFrom path = AbfsClient.getDirectoryQueryParameter(path); final String startFrom = (path.isEmpty() || path.equals(ROOT_PATH)) ? firstEntryName : path + ROOT_PATH + firstEntryName; SimpleDateFormat simpleDateFormat = new SimpleDateFormat(TOKEN_DATE_PATTERN, Locale.US); String date = simpleDateFormat.format(new Date()); String token = String.format("%06d!%s!%06d!%s!%06d!%s!", path.length(), path, startFrom.length(), startFrom, date.length(), date); String base64EncodedToken = Base64.encode(token.getBytes(StandardCharsets.UTF_8)); StringBuilder encodedTokenBuilder = new StringBuilder(base64EncodedToken.length() + 5); encodedTokenBuilder.append(String.format("%s!%d!", TOKEN_VERSION, base64EncodedToken.length())); for (int i = 0; i < base64EncodedToken.length(); i++) { char current = base64EncodedToken.charAt(i); if (CHAR_FORWARD_SLASH == current) { current = CHAR_UNDERSCORE; } else if (CHAR_PLUS == current) { current = CHAR_STAR; } else if (CHAR_EQUALS == current) { current = CHAR_HYPHEN; } encodedTokenBuilder.append(current); } return encodedTokenBuilder.toString(); }
3.68
framework_TestSizeableIncomponents_getComponent
/** * Instantiates and populates component with test data to be ready for * testing. * * @return * @throws InstantiationException * @throws IllegalAccessException */ public Component getComponent() throws InstantiationException, IllegalAccessException { Component c = (Component) classToTest.newInstance(); if (c instanceof Button) { ((AbstractComponent) c).setCaption("test"); } if (AbstractSelect.class.isAssignableFrom(c.getClass())) { if (c instanceof Table) { Table new_name = (Table) c; new_name.setContainerDataSource( TestForTablesInitialColumnWidthLogicRendering .getTestTable(5, 100) .getContainerDataSource()); } else { AbstractSelect new_name = (AbstractSelect) c; Container cont = TestForTablesInitialColumnWidthLogicRendering .getTestTable(2, 8).getContainerDataSource(); new_name.setContainerDataSource(cont); new_name.setItemCaptionPropertyId( cont.getContainerPropertyIds().iterator().next()); } } else if (c instanceof ComponentContainer) { ComponentContainer new_name = (ComponentContainer) c; new_name.addComponent( new Label("component 1 in test container")); new_name.addComponent(new Button("component 2")); } else if (c instanceof Embedded) { Embedded em = (Embedded) c; em.setSource(new ThemeResource("test.png")); } else if (c instanceof Label) { ((Label) c).setValue("Test label"); } for (Configuration conf : configurations) { conf.configure(c); } return c; }
3.68
pulsar_BrokerInterceptorUtils_getBrokerInterceptorDefinition
/** * Retrieve the broker interceptor definition from the provided handler nar package. * * @param narPath the path to the broker interceptor NAR package * @return the broker interceptor definition * @throws IOException when fail to load the broker interceptor or get the definition */ public BrokerInterceptorDefinition getBrokerInterceptorDefinition(String narPath, String narExtractionDirectory) throws IOException { try (NarClassLoader ncl = NarClassLoaderBuilder.builder() .narFile(new File(narPath)) .extractionDirectory(narExtractionDirectory) .build()) { return getBrokerInterceptorDefinition(ncl); } }
3.68
rocketmq-connect_RetryWithToleranceOperator_reporters
/** * Set the error reporters for this connector. * * @param reporters the error reporters (should not be null). */ public void reporters(List<ErrorReporter> reporters) { this.context.reporters(reporters); }
3.68
flink_FileSystem_getLocalFileSystem
/** * Returns a reference to the {@link FileSystem} instance for accessing the local file system. * * @return a reference to the {@link FileSystem} instance for accessing the local file system. */ public static FileSystem getLocalFileSystem() { return FileSystemSafetyNet.wrapWithSafetyNetWhenActivated( LocalFileSystem.getSharedInstance()); }
3.68
framework_ExpandingContainer_checkExpand
// Expand container if we scroll past 85% public int checkExpand(int index) { log("checkExpand(" + index + ")"); if (index >= currentSize * 0.85) { final int oldsize = currentSize; currentSize = (int) (oldsize * 1.3333); log("*** getSizeWithHint(" + index + "): went past 85% of size=" + oldsize + ", new size=" + currentSize); updateLabel(); } return currentSize; }
3.68
hbase_Abortable_abort
/** * It just call another abort method and the Throwable parameter is null. * @param why Why we're aborting. * @see Abortable#abort(String, Throwable) */ default void abort(String why) { abort(why, null); }
3.68
hadoop_RegistryDNSServer_serviceInit
/** * Initializes the DNS server. * @param conf the hadoop configuration instance. * @throws Exception if service initialization fails. */ @Override protected void serviceInit(Configuration conf) throws Exception { pathToRecordMap = new ConcurrentHashMap<>(); registryOperations = new RegistryOperationsService("RegistryDNSOperations"); addService(registryOperations); if (registryDNS == null) { registryDNS = (RegistryDNS) DNSOperationsFactory.createInstance(conf); } addService(registryDNS); super.serviceInit(conf); }
3.68
hbase_StorageClusterStatusModel_setTotalStaticBloomSizeKB
/** * @param totalStaticBloomSizeKB The total size of all Bloom filter blocks, not just loaded * into the block cache, in KB. */ public void setTotalStaticBloomSizeKB(int totalStaticBloomSizeKB) { this.totalStaticBloomSizeKB = totalStaticBloomSizeKB; }
3.68
flink_FieldParser_nextStringEndPos
/** * Returns the end position of a string. Sets the error state if the column is empty. * * @return the end position of the string or -1 if an error occurred */ protected final int nextStringEndPos(byte[] bytes, int startPos, int limit, byte[] delimiter) { int endPos = startPos; final int delimLimit = limit - delimiter.length + 1; while (endPos < limit) { if (endPos < delimLimit && delimiterNext(bytes, endPos, delimiter)) { break; } endPos++; } if (endPos == startPos) { setErrorState(ParseErrorState.EMPTY_COLUMN); return -1; } return endPos; }
3.68
pulsar_SslContextAutoRefreshBuilder_get
/** * It updates SSLContext at every configured refresh time and returns updated SSLContext. * * @return */ public T get() { T ctx = getSslContext(); if (ctx == null) { try { update(); lastRefreshTime = System.currentTimeMillis(); return getSslContext(); } catch (GeneralSecurityException | IOException e) { log.error("Exception while trying to refresh ssl Context {}", e.getMessage(), e); } } else { long now = System.currentTimeMillis(); if (refreshTime <= 0 || now > (lastRefreshTime + refreshTime)) { if (needUpdate()) { try { ctx = update(); lastRefreshTime = now; } catch (GeneralSecurityException | IOException e) { log.error("Exception while trying to refresh ssl Context {} ", e.getMessage(), e); } } } } return ctx; }
3.68
hbase_AccessController_grant
/** * @deprecated since 2.2.0 and will be removed in 4.0.0. Use * {@link Admin#grant(UserPermission, boolean)} instead. * @see Admin#grant(UserPermission, boolean) * @see <a href="https://issues.apache.org/jira/browse/HBASE-21739">HBASE-21739</a> */ @Deprecated @Override public void grant(RpcController controller, AccessControlProtos.GrantRequest request, RpcCallback<AccessControlProtos.GrantResponse> done) { final UserPermission perm = AccessControlUtil.toUserPermission(request.getUserPermission()); AccessControlProtos.GrantResponse response = null; try { // verify it's only running at .acl. if (aclRegion) { if (!initialized) { throw new CoprocessorException("AccessController not yet initialized"); } User caller = RpcServer.getRequestUser().orElse(null); if (LOG.isDebugEnabled()) { LOG.debug("Received request from {} to grant access permission {}", caller.getName(), perm.toString()); } preGrantOrRevoke(caller, "grant", perm); // regionEnv is set at #start. Hopefully not null at this point. regionEnv.getConnection().getAdmin().grant( new UserPermission(perm.getUser(), perm.getPermission()), request.getMergeExistingPermissions()); if (AUDITLOG.isTraceEnabled()) { // audit log should store permission changes in addition to auth results AUDITLOG.trace("Granted permission " + perm.toString()); } } else { throw new CoprocessorException(AccessController.class, "This method " + "can only execute at " + PermissionStorage.ACL_TABLE_NAME + " table."); } response = AccessControlProtos.GrantResponse.getDefaultInstance(); } catch (IOException ioe) { // pass exception back up CoprocessorRpcUtils.setControllerException(controller, ioe); } done.run(response); }
3.68
morf_InsertStatementBuilder_getHints
/** * @return all hints in the order they were declared. */ List<Hint> getHints() { return hints; }
3.68
hbase_DeadServer_putIfAbsent
/** * Adds the server to the dead server list if it's not there already. */ synchronized void putIfAbsent(ServerName sn) { this.deadServers.putIfAbsent(sn, EnvironmentEdgeManager.currentTime()); }
3.68
hbase_Size_getUnit
/** Returns size unit */ public Unit getUnit() { return unit; }
3.68
zxing_PDF417ResultMetadata_getFileSize
/** * filesize in bytes of the encoded file * * @return filesize in bytes, -1 if not set */ public long getFileSize() { return fileSize; }
3.68
hibernate-validator_XmlParserHelper_getSchemaVersion
/** * Retrieves the schema version applying for the given XML input stream as * represented by the "version" attribute of the root element of the stream. * <p> * The given reader will be advanced to the root element of the given XML * structure. It can be used for unmarshalling from there. * * @param resourceName The name of the represented XML resource. * @param xmlEventReader An STAX event reader * * @return The value of the "version" attribute. For compatibility with BV * 1.0, "1.0" will be returned if the given stream doesn't have a * "version" attribute. */ public String getSchemaVersion(String resourceName, XMLEventReader xmlEventReader) { Contracts.assertNotNull( xmlEventReader, MESSAGES.parameterMustNotBeNull( "xmlEventReader" ) ); try { StartElement rootElement = getRootElement( xmlEventReader ); return getVersionValue( rootElement ); } catch (XMLStreamException e) { throw LOG.getUnableToDetermineSchemaVersionException( resourceName, e ); } }
3.68
flink_BufferDecompressor_decompressToIntermediateBuffer
/** * Decompresses the given {@link Buffer} using {@link BlockDecompressor}. The decompressed data * will be stored in the intermediate buffer of this {@link BufferDecompressor} and returned to * the caller. The caller must guarantee that the returned {@link Buffer} has been freed when * calling the method next time. * * <p>Notes that the decompression will always start from offset 0 to the size of the input * {@link Buffer}. */ public Buffer decompressToIntermediateBuffer(Buffer buffer) { int decompressedLen = decompress(buffer); internalBuffer.setSize(decompressedLen); return internalBuffer.retainBuffer(); }
3.68
flink_FlinkImageBuilder_setTempDirectory
/** * Sets temporary path for holding temp files when building the image. * * <p>Note that this parameter is required, because the builder doesn't have lifecycle * management, and it is the caller's responsibility to create and remove the temp directory. */ public FlinkImageBuilder setTempDirectory(Path tempDirectory) { this.tempDirectory = tempDirectory; return this; }
3.68
hudi_TableChanges_getFullColName2Id
// expose to test public Map<String, Integer> getFullColName2Id() { return fullColName2Id; }
3.68
pulsar_AuthenticationDataSource_hasDataFromTls
/** * Check if data from TLS are available. * * @return true if this authentication data contain data from TLS */ default boolean hasDataFromTls() { return false; }
3.68
morf_JdbcUrlElements_getInstanceName
/** * @return the instance name. The meaning of this varies between database types. */ public String getInstanceName() { return instanceName; }
3.68
hbase_Bytes_putLong
/** * Put a long value out to the specified byte array position. * @param bytes the byte array * @param offset position in the array * @param val long to write out * @return incremented offset * @throws IllegalArgumentException if the byte array given doesn't have enough room at the offset * specified. */ public static int putLong(byte[] bytes, int offset, long val) { if (bytes.length - offset < SIZEOF_LONG) { throw new IllegalArgumentException("Not enough room to put a long at" + " offset " + offset + " in a " + bytes.length + " byte array"); } return ConverterHolder.BEST_CONVERTER.putLong(bytes, offset, val); }
3.68
hadoop_RouterSafemodeService_enter
/** * Enter safe mode. */ private void enter() { LOG.info("Entering safe mode"); enterSafeModeTime = monotonicNow(); safeMode = true; router.updateRouterState(RouterServiceState.SAFEMODE); }
3.68
flink_CalciteParser_parseSqlList
/** * Parses a SQL string into a {@link SqlNodeList}. The {@link SqlNodeList} is not yet validated. * * @param sql a sql string to parse * @return a parsed sql node list * @throws SqlParserException if an exception is thrown when parsing the statement * @throws SqlParserEOFException if the statement is incomplete */ public SqlNodeList parseSqlList(String sql) { try { SqlParser parser = SqlParser.create(sql, config); return parser.parseStmtList(); } catch (SqlParseException e) { if (e.getMessage().contains("Encountered \"<EOF>\"")) { throw new SqlParserEOFException(e.getMessage(), e); } throw new SqlParserException("SQL parse failed. " + e.getMessage(), e); } }
3.68
hadoop_AMRMProxyApplicationContextImpl_setLocalAMRMToken
/** * Sets the application's AMRMToken. * * @param localToken amrmToken issued by AMRMProxy */ public synchronized void setLocalAMRMToken( Token<AMRMTokenIdentifier> localToken) { this.localToken = localToken; this.localTokenKeyId = null; }
3.68