name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hbase_Mutation_getFingerprint
/** * Compile the column family (i.e. schema) information into a Map. Useful for parsing and * aggregation by debugging, logging, and administration tools. */ @Override public Map<String, Object> getFingerprint() { Map<String, Object> map = new HashMap<>(); List<String> families = new ArrayList<>(getFamilyCellMap().entrySet().size()); // ideally, we would also include table information, but that information // is not stored in each Operation instance. map.put("families", families); for (Map.Entry<byte[], List<Cell>> entry : getFamilyCellMap().entrySet()) { families.add(Bytes.toStringBinary(entry.getKey())); } return map; }
3.68
pulsar_ManagedLedgerConfig_getMetadataEnsemblesize
/** * @return the metadataEnsemblesize */ public int getMetadataEnsemblesize() { return metadataEnsembleSize; }
3.68
hadoop_UnresolvedPathException_getResolvedPath
/** * Return a path with the link resolved with the target. */ public Path getResolvedPath() { // If the path is absolute we cam throw out the preceding part and // just append the remainder to the target, otherwise append each // piece to resolve the link in path. boolean noRemainder = (remainder == null || "".equals(remainder)); Path target = new Path(linkTarget); if (target.isUriPathAbsolute()) { return noRemainder ? target : new Path(target, remainder); } else { return noRemainder ? new Path(preceding, target) : new Path(new Path(preceding, linkTarget), remainder); } }
3.68
hbase_MasterProcedureScheduler_waitRegions
/** * Suspend the procedure if the specified set of regions are already locked. * @param procedure the procedure trying to acquire the lock on the regions * @param table the table name of the regions we are trying to lock * @param regionInfos the list of regions we are trying to lock * @return true if the procedure has to wait for the regions to be available */ public boolean waitRegions(final Procedure<?> procedure, final TableName table, final RegionInfo... regionInfos) { Arrays.sort(regionInfos, RegionInfo.COMPARATOR); schedLock(); try { assert table != null; if (waitTableSharedLock(procedure, table)) { return true; } // acquire region xlocks or wait boolean hasLock = true; final LockAndQueue[] regionLocks = new LockAndQueue[regionInfos.length]; for (int i = 0; i < regionInfos.length; ++i) { assert regionInfos[i] != null; assert regionInfos[i].getTable() != null; assert regionInfos[i].getTable().equals(table) : regionInfos[i] + " " + procedure; assert i == 0 || regionInfos[i] != regionInfos[i - 1] : "duplicate region: " + regionInfos[i]; regionLocks[i] = locking.getRegionLock(regionInfos[i].getEncodedName()); if (!regionLocks[i].tryExclusiveLock(procedure)) { LOG.info("Waiting on xlock for {} held by pid={}", procedure, regionLocks[i].getExclusiveLockProcIdOwner()); waitProcedure(regionLocks[i], procedure); hasLock = false; while (i-- > 0) { regionLocks[i].releaseExclusiveLock(procedure); } break; } else { LOG.info("Took xlock for {}", procedure); } } if (!hasLock) { wakeTableSharedLock(procedure, table); } return !hasLock; } finally { schedUnlock(); } }
3.68
AreaShop_Utils_configToLocation
/** * Create a location from a map, reconstruction from the config values. * @param config The config section to reconstruct from * @return The location */ public static Location configToLocation(ConfigurationSection config) { if(config == null || !config.isString("world") || !config.isDouble("x") || !config.isDouble("y") || !config.isDouble("z") || Bukkit.getWorld(config.getString("world")) == null) { return null; } Location result = new Location( Bukkit.getWorld(config.getString("world")), config.getDouble("x"), config.getDouble("y"), config.getDouble("z")); if(config.isString("yaw") && config.isString("pitch")) { result.setPitch(Float.parseFloat(config.getString("pitch"))); result.setYaw(Float.parseFloat(config.getString("yaw"))); } return result; }
3.68
dubbo_FieldUtils_isEnumMemberField
/** * is Enum's member field or not * * @param field {@link VariableElement} must be public static final fields * @return if field is public static final, return <code>true</code>, or <code>false</code> */ static boolean isEnumMemberField(VariableElement field) { if (field == null || !isEnumType(field.getEnclosingElement())) { return false; } return ENUM_CONSTANT.equals(field.getKind()); }
3.68
framework_LoadingIndicatorConfiguration_setThirdDelay
/* * (non-Javadoc) * * @see com.vaadin.ui.LoadingIndicator#setThirdDelay(int) */ @Override public void setThirdDelay(int thirdDelay) { getState().thirdDelay = thirdDelay; }
3.68
flink_HiveParserSemanticAnalyzer_gatherCTEReferences
// TODO: check view references, too private void gatherCTEReferences( HiveParserQB qb, HiveParserBaseSemanticAnalyzer.CTEClause current) throws HiveException { for (String alias : qb.getTabAliases()) { String originTabName = qb.getOriginTabNameForAlias(alias); String cteName = originTabName.toLowerCase(); HiveParserBaseSemanticAnalyzer.CTEClause cte = findCTEFromName(qb, cteName); if (cte != null) { if (ctesExpanded.contains(cteName)) { throw new SemanticException( "Recursive cte " + cteName + " detected (cycle: " + StringUtils.join(ctesExpanded, " -> ") + " -> " + cteName + ")."); } cte.reference++; current.parents.add(cte); if (cte.qbExpr != null) { continue; } cte.qbExpr = new HiveParserQBExpr(cteName); doPhase1QBExpr(cte.cteNode, cte.qbExpr, qb.getId(), cteName); ctesExpanded.add(cteName); gatherCTEReferences(cte.qbExpr, cte); ctesExpanded.remove(ctesExpanded.size() - 1); } } for (String alias : qb.getSubqAliases()) { gatherCTEReferences(qb.getSubqForAlias(alias), current); } }
3.68
querydsl_MetaDataExporter_setImports
/** * Set the java imports * * @param imports * java imports array */ public void setImports(String[] imports) { module.bind(CodegenModule.IMPORTS, new HashSet<String>(Arrays.asList(imports))); }
3.68
starts_Attribute_getCount
/** * Returns the length of the attribute list that begins with this attribute. * * @return the length of the attribute list that begins with this attribute. */ final int getCount() { int count = 0; Attribute attr = this; while (attr != null) { count += 1; attr = attr.next; } return count; }
3.68
framework_VAbsoluteLayout_updateStylenames
/** * Updates all style names contained in the layout. * * @param primaryStyleName * The style name to use as primary */ protected void updateStylenames(String primaryStyleName) { super.setStylePrimaryName(primaryStyleName); canvas.setClassName(getStylePrimaryName() + "-canvas"); canvas.setClassName(getStylePrimaryName() + "-margin"); for (Widget w : getChildren()) { if (w instanceof AbsoluteWrapper) { AbsoluteWrapper wrapper = (AbsoluteWrapper) w; wrapper.updateStyleNames(); } } }
3.68
flink_CheckpointsCleaner_addSubsumedCheckpoint
/** * Add one subsumed checkpoint to CheckpointsCleaner, the subsumed checkpoint would be discarded * at {@link #cleanSubsumedCheckpoints(long, Set, Runnable, Executor)}. * * @param completedCheckpoint which is subsumed. */ public void addSubsumedCheckpoint(CompletedCheckpoint completedCheckpoint) { synchronized (lock) { subsumedCheckpoints.add(completedCheckpoint); } }
3.68
hudi_BufferedRandomAccessFile_endPosition
/** * @return endPosition of the buffer. For the last file block, this may not be a valid position. */ private long endPosition() { return this.startPosition + this.capacity; }
3.68
flink_SpillingThread_mergeChannels
/** * Merges the sorted runs described by the given Channel IDs into a single sorted run. The * merging process uses the given read and write buffers. * * @param channelIDs The IDs of the runs' channels. * @param readBuffers The buffers for the readers that read the sorted runs. * @param writeBuffers The buffers for the writer that writes the merged channel. * @return The ID and number of blocks of the channel that describes the merged run. */ private ChannelWithBlockCount mergeChannels( List<ChannelWithBlockCount> channelIDs, List<List<MemorySegment>> readBuffers, List<MemorySegment> writeBuffers) throws IOException { // the list with the readers, to be closed at shutdown final List<FileIOChannel> channelAccesses = new ArrayList<>(channelIDs.size()); // the list with the target iterators final MergeIterator<E> mergeIterator = getMergingIterator(channelIDs, readBuffers, channelAccesses, null); // create a new channel writer final FileIOChannel.ID mergedChannelID = this.ioManager.createChannel(); spillChannelManager.registerChannelToBeRemovedAtShutdown(mergedChannelID); final BlockChannelWriter<MemorySegment> writer = this.ioManager.createBlockChannelWriter(mergedChannelID); spillChannelManager.registerOpenChannelToBeRemovedAtShutdown(writer); final ChannelWriterOutputView output = new ChannelWriterOutputView(writer, writeBuffers, this.memManager.getPageSize()); openSpillingBehaviour(); spillingBehaviour.mergeRecords(mergeIterator, output); output.close(); final int numBlocksWritten = output.getBlockCount(); // register merged result to be removed at shutdown spillChannelManager.unregisterOpenChannelToBeRemovedAtShutdown(writer); // remove the merged channel readers from the clear-at-shutdown list for (FileIOChannel access : channelAccesses) { access.closeAndDelete(); spillChannelManager.unregisterOpenChannelToBeRemovedAtShutdown(access); } return new ChannelWithBlockCount(mergedChannelID, numBlocksWritten); }
3.68
pulsar_ConsumerImpl_sendFlowPermitsToBroker
/** * send the flow command to have the broker start pushing messages. */ private void sendFlowPermitsToBroker(ClientCnx cnx, int numMessages) { if (cnx != null && numMessages > 0) { if (log.isDebugEnabled()) { log.debug("[{}] [{}] Adding {} additional permits", topic, subscription, numMessages); } if (log.isDebugEnabled()) { cnx.ctx().writeAndFlush(Commands.newFlow(consumerId, numMessages)) .addListener(writeFuture -> { if (!writeFuture.isSuccess()) { log.debug("Consumer {} failed to send {} permits to broker: {}", consumerId, numMessages, writeFuture.cause().getMessage()); } else { log.debug("Consumer {} sent {} permits to broker", consumerId, numMessages); } }); } else { cnx.ctx().writeAndFlush(Commands.newFlow(consumerId, numMessages), cnx.ctx().voidPromise()); } } }
3.68
flink_SourceTestSuiteBase_testSourceMetrics
/** * Test connector source metrics. * * <p>This test will create 4 splits in the external system first, write test data to all splits * and consume back via a Flink job with parallelism 4. Then read and compare the metrics. * * <p>Now test: numRecordsIn */ @TestTemplate @DisplayName("Test source metrics") public void testSourceMetrics( TestEnvironment testEnv, DataStreamSourceExternalContext<T> externalContext, CheckpointingMode semantic) throws Exception { TestingSourceSettings sourceSettings = TestingSourceSettings.builder() .setBoundedness(Boundedness.CONTINUOUS_UNBOUNDED) .setCheckpointingMode(semantic) .build(); TestEnvironmentSettings envOptions = TestEnvironmentSettings.builder() .setConnectorJarPaths(externalContext.getConnectorJarPaths()) .build(); final int splitNumber = 4; final List<List<T>> testRecordCollections = new ArrayList<>(); for (int i = 0; i < splitNumber; i++) { testRecordCollections.add(generateAndWriteTestData(i, externalContext, sourceSettings)); } // make sure use different names when executes multi times String sourceName = "metricTestSource" + testRecordCollections.hashCode(); final StreamExecutionEnvironment env = testEnv.createExecutionEnvironment(envOptions); final DataStreamSource<T> dataStreamSource = env.fromSource( tryCreateSource(externalContext, sourceSettings), WatermarkStrategy.noWatermarks(), sourceName) .setParallelism(splitNumber); dataStreamSource.sinkTo(new DiscardingSink<>()); final JobClient jobClient = env.executeAsync("Metrics Test"); final MetricQuerier queryRestClient = new MetricQuerier(new Configuration()); final ExecutorService executorService = Executors.newCachedThreadPool(); try { waitForAllTaskRunning( () -> getJobDetails( new RestClient(new Configuration(), executorService), testEnv.getRestEndpoint(), jobClient.getJobID())); waitUntilCondition( () -> { // test metrics try { return checkSourceMetrics( queryRestClient, testEnv, jobClient.getJobID(), sourceName, getTestDataSize(testRecordCollections)); } catch (Exception e) { // skip failed assert try return false; } }); } finally { // Clean up executorService.shutdown(); killJob(jobClient); } }
3.68
flink_CastRuleProvider_generateCodeBlock
/** * Create a {@link CastCodeBlock} for the provided input type and target type. Returns {@code * null} if no rule can be resolved or the resolved rule is not instance of {@link * CodeGeneratorCastRule}. * * @see CodeGeneratorCastRule#generateCodeBlock(CodeGeneratorCastRule.Context, String, String, * LogicalType, LogicalType) */ @SuppressWarnings("rawtypes") public static @Nullable CastCodeBlock generateCodeBlock( CodeGeneratorCastRule.Context context, String inputTerm, String inputIsNullTerm, LogicalType inputLogicalType, LogicalType targetLogicalType) { CastRule<?, ?> rule = INSTANCE.internalResolve(inputLogicalType, targetLogicalType); if (!(rule instanceof CodeGeneratorCastRule)) { return null; } return ((CodeGeneratorCastRule) rule) .generateCodeBlock( context, inputTerm, inputIsNullTerm, inputLogicalType, targetLogicalType); }
3.68
querydsl_NumberExpression_like
/** * Create a {@code this like str} expression * * @param str * @return this like str */ public BooleanExpression like(Expression<String> str) { return Expressions.booleanOperation(Ops.LIKE, stringValue(), str); }
3.68
hadoop_MutableCSConfigurationProvider_getInitSchedulerConfig
// Unit test can overwrite this method protected Configuration getInitSchedulerConfig() { Configuration initialSchedConf = new Configuration(false); initialSchedConf. addResource(YarnConfiguration.CS_CONFIGURATION_FILE); return initialSchedConf; }
3.68
dubbo_MonitorFilter_collect
/** * The collector logic, it will be handled by the default monitor * * @param invoker * @param invocation * @param result the invocation result * @param remoteHost the remote host address * @param start the timestamp the invocation begin * @param error if there is an error on the invocation */ private void collect( Invoker<?> invoker, Invocation invocation, Result result, String remoteHost, long start, boolean error) { try { Object monitorUrl; monitorUrl = invoker.getUrl().getAttribute(MONITOR_KEY); if (monitorUrl instanceof URL) { Monitor monitor = monitorFactory.getMonitor((URL) monitorUrl); if (monitor == null) { return; } URL statisticsUrl = createStatisticsUrl(invoker, invocation, result, remoteHost, start, error); monitor.collect(statisticsUrl.toSerializableURL()); } } catch (Throwable t) { logger.warn( COMMON_MONITOR_EXCEPTION, "", "", "Failed to monitor count service " + invoker.getUrl() + ", cause: " + t.getMessage(), t); } }
3.68
flink_OpenApiSpecGenerator_injectAsyncOperationResultSchema
/** * The {@link AsynchronousOperationResult} contains a generic 'operation' field that can't be * properly extracted from swagger. This method injects these manually. * * <p>Resulting spec diff: * * <pre> * AsynchronousOperationResult: * type: object * properties: * operation: * - type: object * + oneOf: * + - $ref: '#/components/schemas/AsynchronousOperationInfo' * + - $ref: '#/components/schemas/SavepointInfo' * </pre> */ private static void injectAsyncOperationResultSchema( final OpenAPI openApi, List<Schema> asyncOperationSchemas) { final Schema schema = openApi.getComponents() .getSchemas() .get(AsynchronousOperationResult.class.getSimpleName()); if (schema != null) { schema.getProperties() .put( AsynchronousOperationResult.FIELD_NAME_OPERATION, new ComposedSchema().oneOf(asyncOperationSchemas)); } }
3.68
framework_RichTextAreaElement_getValue
/** * Return value of the field element. * * @return value of the field element * @since 8.4 */ public String getValue() { JavascriptExecutor executor = (JavascriptExecutor) getDriver(); return executor.executeScript( "return arguments[0].contentDocument.body.innerHTML", getEditorIframe()).toString(); }
3.68
morf_CreateViewListener_registerView
/** * * @param view View being created. * @return List of sql statements. * @deprecated kept to ensure backwards compatibility. */ @Override @Deprecated public Iterable<String> registerView(View view) { return ImmutableList.of(); }
3.68
flink_ResourceCounter_getResourceCount
/** * Number of resources with the given {@link ResourceProfile}. * * @param resourceProfile resourceProfile for which to look up the count * @return number of resources with the given resourceProfile or {@code 0} if the resource * profile does not exist */ public int getResourceCount(ResourceProfile resourceProfile) { return resources.getOrDefault(resourceProfile, 0); }
3.68
framework_Design_designToComponentTree
/** * Constructs a component hierarchy from the design specified as an html * tree. * * <p> * If a component root is given, the component instances created during * reading the design are assigned to its member fields based on their id, * local id, and caption * <p> * If the root is a custom component or composite, its composition root will * be populated with the design contents. Note that even if the root * component is a custom component/composite, the root element of the design * should not be to avoid nesting a custom component in a custom component. * * @param doc * the html tree * @param componentRoot * optional component root instance. The type must match the type * of the root element in the design or be a * {@link CustomComponent} or {@link Composite}, in which case * the root component will be set as the composition root. * @param classWithFields * a class (componentRoot class or a super class) with some * member fields. The member fields whose type is assignable from * {@link Component} are bound to fields in the design based on * id/local id/caption */ private static DesignContext designToComponentTree(Document doc, Component componentRoot, Class<?> classWithFields) { DesignContext designContext = new DesignContext(doc); designContext.readPackageMappings(doc); // No special handling for a document without a body element - should be // taken care of by jsoup. Element root = doc.body(); Elements children = root.children(); if (children.size() > 1) { throw new DesignException( "The first level of a component hierarchy should contain at most one root component, but found " + children.size() + "."); } Element element = children.isEmpty() ? null : children.first(); if (componentRoot != null) { if (element == null) { throw new DesignException( "The root element cannot be null when the specified root Component is" + " not null."); } // user has specified root instance that may have member fields that // should be bound final FieldBinder binder; try { binder = new FieldBinder(componentRoot, classWithFields); } catch (IntrospectionException e) { throw new DesignException( "Could not bind fields of the root component", e); } // create listener for component creations that binds the created // components to the componentRoot instance fields ComponentCreationListener creationListener = ( ComponentCreatedEvent event) -> binder.bindField( event.getComponent(), event.getLocalId()); designContext.addComponentCreationListener(creationListener); // create subtree if (ComponentRootSetter.canSetRoot(componentRoot)) { Component rootComponent = designContext.readDesign(element); ComponentRootSetter.setRoot(componentRoot, rootComponent); } else { designContext.readDesign(element, componentRoot); } // make sure that all the member fields are bound Collection<String> unboundFields = binder.getUnboundFields(); if (!unboundFields.isEmpty()) { throw new DesignException( "Found unbound fields from component root " + unboundFields); } // no need to listen anymore designContext.removeComponentCreationListener(creationListener); } else { // createChild creates the entire component hierarchy componentRoot = element == null ? null : designContext.readDesign(element); } designContext.setRootComponent(componentRoot); return designContext; }
3.68
hadoop_FindOptions_setCommandFactory
/** * Set the command factory. * * @param factory {@link CommandFactory} */ public void setCommandFactory(CommandFactory factory) { this.commandFactory = factory; }
3.68
flink_ExecNodeContext_generateUid
/** Returns a new {@code uid} for transformations. */ public String generateUid(String transformationName, ExecNodeConfig config) { if (!transformationNamePattern.matcher(transformationName).matches()) { throw new TableException( "Invalid transformation name '" + transformationName + "'. " + "This is a bug, please file an issue."); } final String uidPattern = config.get(ExecutionConfigOptions.TABLE_EXEC_UID_FORMAT); // Note: name and version are not included in the UID by default as they would prevent // migration. // No version because: An operator can change its state layout and bump up the ExecNode // version, in this case the UID should still be able to map state even after plan // migration to the new version. // No name because: We might fuse operators in the future, and a new operator might // subscribe to multiple old UIDs. return StringUtils.replaceEach( uidPattern, new String[] {"<id>", "<type>", "<version>", "<transformation>"}, new String[] { String.valueOf(id), name, String.valueOf(version), transformationName }); }
3.68
hbase_RoundRobinTableInputFormat_roundRobin
/** * Spread the splits list so as to avoid clumping on RegionServers. Order splits so every server * gets one split before a server gets a second, and so on; i.e. round-robin the splits amongst * the servers in the cluster. */ List<InputSplit> roundRobin(List<InputSplit> inputs) throws IOException { if ((inputs == null) || inputs.isEmpty()) { return inputs; } List<InputSplit> result = new ArrayList<>(inputs.size()); // Prepare a hashmap with each region server as key and list of Input Splits as value Map<String, List<InputSplit>> regionServerSplits = new HashMap<>(); for (InputSplit is : inputs) { if (is instanceof TableSplit) { String regionServer = ((TableSplit) is).getRegionLocation(); if (regionServer != null && !StringUtils.isBlank(regionServer)) { regionServerSplits.computeIfAbsent(regionServer, k -> new ArrayList<>()).add(is); continue; } } // If TableSplit or region server not found, add it anyways. result.add(is); } // Write out splits in a manner that spreads splits for a RegionServer to avoid 'clumping'. while (!regionServerSplits.isEmpty()) { Iterator<List<InputSplit>> iter = regionServerSplits.values().iterator(); while (iter.hasNext()) { List<InputSplit> inputSplitListForRegion = iter.next(); if (!inputSplitListForRegion.isEmpty()) { result.add(inputSplitListForRegion.remove(0)); } if (inputSplitListForRegion.isEmpty()) { iter.remove(); } } } return result; }
3.68
hbase_CheckAndMutate_build
/** * Build the CheckAndMutate object with a RowMutations to commit if the check succeeds. * @param mutations mutations to perform if check succeeds * @return a CheckAndMutate object */ public CheckAndMutate build(RowMutations mutations) { preCheck(mutations); if (filter != null) { return new CheckAndMutate(row, filter, timeRange, mutations); } else { return new CheckAndMutate(row, family, qualifier, op, value, timeRange, mutations); } }
3.68
hadoop_WriteOperationHelper_activateAuditSpan
/** * Activate the audit span. * @return the span */ private AuditSpan activateAuditSpan() { return auditSpan.activate(); }
3.68
shardingsphere-elasticjob_JobNodeStorage_isJobNodeExisted
/** * Judge is job node existed or not. * * @param node node * @return is job node existed or not */ public boolean isJobNodeExisted(final String node) { return regCenter.isExisted(jobNodePath.getFullPath(node)); }
3.68
hbase_WALKeyImpl_getTableName
/** Returns table name */ @Override public TableName getTableName() { return tablename; }
3.68
flink_S3TestCredentials_isNotEmpty
/** Checks if a String is not null and not empty. */ private static boolean isNotEmpty(@Nullable String str) { return str != null && !str.isEmpty(); }
3.68
morf_AbstractSelectStatementBuilder_crossJoin
/** * Specifies a cross join (creating a cartesian product) to a subselect: * * <blockquote><pre> * // Each sale as a percentage of all sales * TableReference sale = tableRef("Sale"); * SelectStatement outer = select( * sale.field("id"), * sale.field("amount") * .divideBy(totalSales.asTable().field("amount")) * .multiplyBy(literal(100)) * ) * .from(sale) * .innerJoin( * select(sum(field("amount"))) * .from(sale) * .alias("totalSales") * ); * </pre></blockquote> * * @param subSelect the sub select statement to join on to * @return this, for method chaining. */ public T crossJoin(SelectStatement subSelect) { joins.add(new Join(JoinType.INNER_JOIN, subSelect, null)); return castToChild(this); }
3.68
framework_ColorChangeEvent_getColor
/** * Returns the new color. */ public Color getColor() { return color; }
3.68
hbase_MasterAddressTracker_setMasterAddress
/** * Set master address into the <code>master</code> znode or into the backup subdirectory of backup * masters; switch off the passed in <code>znode</code> path. * @param zkw The ZKWatcher to use. * @param znode Where to create the znode; could be at the top level or it could be under backup * masters * @param master ServerName of the current master must not be null. * @return true if node created, false if not; a watch is set in both cases * @throws KeeperException if a ZooKeeper operation fails */ public static boolean setMasterAddress(final ZKWatcher zkw, final String znode, final ServerName master, int infoPort) throws KeeperException { return ZKUtil.createEphemeralNodeAndWatch(zkw, znode, toByteArray(master, infoPort)); }
3.68
morf_AbstractSqlDialectTest_testSelectWithLikeClause
/** * Tests a select with a where like clause. */ @Test public void testSelectWithLikeClause() { SelectStatement stmt = new SelectStatement() .from(new TableReference(TEST_TABLE)) .where(like(new FieldReference(STRING_FIELD), "A%")); String value = varCharCast("'A%'"); String expectedSql = "SELECT * FROM " + tableName(TEST_TABLE) + " WHERE (stringField LIKE " + stringLiteralPrefix() + value + likeEscapeSuffix() +")"; assertEquals("Select with a like clause", expectedSql, testDialect.convertStatementToSQL(stmt)); }
3.68
hbase_VisibilityUtils_readLabelsFromZKData
/** * Reads back from the zookeeper. The data read here is of the form written by * writeToZooKeeper(Map&lt;byte[], Integer&gt; entries). * @return Labels and their ordinal details */ public static List<VisibilityLabel> readLabelsFromZKData(byte[] data) throws DeserializationException { if (ProtobufUtil.isPBMagicPrefix(data)) { int pblen = ProtobufUtil.lengthOfPBMagic(); try { VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder(); ProtobufUtil.mergeFrom(builder, data, pblen, data.length - pblen); return builder.getVisLabelList(); } catch (IOException e) { throw new DeserializationException(e); } } return null; }
3.68
hudi_HiveIncrPullSource_findCommitToPull
/** * Finds the first commit from source, greater than the target's last commit, and reads it out. */ private Option<String> findCommitToPull(Option<String> latestTargetCommit) throws IOException { LOG.info("Looking for commits "); FileStatus[] commitTimePaths = fs.listStatus(new Path(incrPullRootPath)); List<String> commitTimes = new ArrayList<>(commitTimePaths.length); for (FileStatus commitTimePath : commitTimePaths) { String[] splits = commitTimePath.getPath().toString().split("/"); commitTimes.add(splits[splits.length - 1]); } Collections.sort(commitTimes); LOG.info("Retrieved commit times " + commitTimes); if (!latestTargetCommit.isPresent()) { // start from the beginning return Option.of(commitTimes.get(0)); } for (String instantTime : commitTimes) { // TODO(vc): Add an option to delete consumed commits if (instantTime.compareTo(latestTargetCommit.get()) > 0) { return Option.of(instantTime); } } return Option.empty(); }
3.68
streampipes_OutputStrategies_keep
/** * Creates a {@link org.apache.streampipes.model.output.KeepOutputStrategy}. Keep output strategies do not change the * schema of an input event, i.e., the output schema matches the input schema. * * @return KeepOutputStrategy */ public static KeepOutputStrategy keep() { return new KeepOutputStrategy(); }
3.68
hadoop_CrcComposer_newStripedCrcComposer
/** * Returns a CrcComposer which will collapse CRCs for every combined * underlying data size which aligns with the specified stripe boundary. For * example, if "update" is called with 20 CRCs and bytesPerCrc == 5, and * stripeLength == 10, then every two (10 / 5) consecutive CRCs will be * combined with each other, yielding a list of 10 CRC "stripes" in the * final digest, each corresponding to 10 underlying data bytes. Using * a stripeLength greater than the total underlying data size is equivalent * to using a non-striped CrcComposer. * * @param type type. * @param bytesPerCrcHint bytesPerCrcHint. * @param stripeLength stripeLength. * @return a CrcComposer which will collapse CRCs for every combined. * underlying data size which aligns with the specified stripe boundary. * @throws IOException raised on errors performing I/O. */ public static CrcComposer newStripedCrcComposer( DataChecksum.Type type, long bytesPerCrcHint, long stripeLength) throws IOException { int polynomial = DataChecksum.getCrcPolynomialForType(type); return new CrcComposer( polynomial, CrcUtil.getMonomial(bytesPerCrcHint, polynomial), bytesPerCrcHint, stripeLength); }
3.68
zxing_IntentResult_getErrorCorrectionLevel
/** * @return name of the error correction level used in the barcode, if applicable */ public String getErrorCorrectionLevel() { return errorCorrectionLevel; }
3.68
framework_VAbstractCalendarPanel_setSubmitListener
/** * The submit listener is called when the user selects a value from the * calendar either by clicking the day or selects it by keyboard. * * @param submitListener * The listener to trigger */ public void setSubmitListener(SubmitListener submitListener) { this.submitListener = submitListener; }
3.68
hbase_HRegion_replayRecoveredEdits
/** * @param edits File of recovered edits. * @param maxSeqIdInStores Maximum sequenceid found in each store. Edits in wal must be larger * than this to be replayed for each store. * @return the sequence id of the last edit added to this region out of the recovered edits log or * <code>minSeqId</code> if nothing added from editlogs. */ private long replayRecoveredEdits(final Path edits, Map<byte[], Long> maxSeqIdInStores, final CancelableProgressable reporter, FileSystem fs) throws IOException { String msg = "Replaying edits from " + edits; LOG.info(msg); MonitoredTask status = TaskMonitor.get().createStatus(msg); status.setStatus("Opening recovered edits"); try (WALStreamReader reader = WALFactory.createStreamReader(fs, edits, conf)) { long currentEditSeqId = -1; long currentReplaySeqId = -1; long firstSeqIdInLog = -1; long skippedEdits = 0; long editsCount = 0; long intervalEdits = 0; WAL.Entry entry; HStore store = null; boolean reported_once = false; ServerNonceManager ng = this.rsServices == null ? null : this.rsServices.getNonceManager(); try { // How many edits seen before we check elapsed time int interval = this.conf.getInt("hbase.hstore.report.interval.edits", 2000); // How often to send a progress report (default 1/2 master timeout) int period = this.conf.getInt("hbase.hstore.report.period", 300000); long lastReport = EnvironmentEdgeManager.currentTime(); if (coprocessorHost != null) { coprocessorHost.preReplayWALs(this.getRegionInfo(), edits); } while ((entry = reader.next()) != null) { WALKey key = entry.getKey(); WALEdit val = entry.getEdit(); if (ng != null) { // some test, or nonces disabled ng.reportOperationFromWal(key.getNonceGroup(), key.getNonce(), key.getWriteTime()); } if (reporter != null) { intervalEdits += val.size(); if (intervalEdits >= interval) { // Number of edits interval reached intervalEdits = 0; long cur = EnvironmentEdgeManager.currentTime(); if (lastReport + period <= cur) { status.setStatus( "Replaying edits..." + " skipped=" + skippedEdits + " edits=" + editsCount); // Timeout reached if (!reporter.progress()) { msg = "Progressable reporter failed, stopping replay for region " + this; LOG.warn(msg); status.abort(msg); throw new IOException(msg); } reported_once = true; lastReport = cur; } } } if (firstSeqIdInLog == -1) { firstSeqIdInLog = key.getSequenceId(); } if (currentEditSeqId > key.getSequenceId()) { // when this condition is true, it means we have a serious defect because we need to // maintain increasing SeqId for WAL edits per region LOG.error(getRegionInfo().getEncodedName() + " : " + "Found decreasing SeqId. PreId=" + currentEditSeqId + " key=" + key + "; edit=" + val); } else { currentEditSeqId = key.getSequenceId(); } currentReplaySeqId = (key.getOrigLogSeqNum() > 0) ? key.getOrigLogSeqNum() : currentEditSeqId; // Start coprocessor replay here. The coprocessor is for each WALEdit // instead of a KeyValue. if (coprocessorHost != null) { status.setStatus("Running pre-WAL-restore hook in coprocessors"); if (coprocessorHost.preWALRestore(this.getRegionInfo(), key, val)) { // if bypass this wal entry, ignore it ... continue; } } boolean checkRowWithinBoundary = false; // Check this edit is for this region. if ( !Bytes.equals(key.getEncodedRegionName(), this.getRegionInfo().getEncodedNameAsBytes()) ) { checkRowWithinBoundary = true; } boolean flush = false; MemStoreSizing memStoreSizing = new NonThreadSafeMemStoreSizing(); for (Cell cell : val.getCells()) { // Check this edit is for me. Also, guard against writing the special // METACOLUMN info such as HBASE::CACHEFLUSH entries if (WALEdit.isMetaEditFamily(cell)) { // if region names don't match, skipp replaying compaction marker if (!checkRowWithinBoundary) { // this is a special edit, we should handle it CompactionDescriptor compaction = WALEdit.getCompaction(cell); if (compaction != null) { // replay the compaction replayWALCompactionMarker(compaction, false, true, Long.MAX_VALUE); } } skippedEdits++; continue; } // Figure which store the edit is meant for. if ( store == null || !CellUtil.matchingFamily(cell, store.getColumnFamilyDescriptor().getName()) ) { store = getStore(cell); } if (store == null) { // This should never happen. Perhaps schema was changed between // crash and redeploy? LOG.warn("No family for cell {} in region {}", cell, this); skippedEdits++; continue; } if ( checkRowWithinBoundary && !rowIsInRange(this.getRegionInfo(), cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) ) { LOG.warn("Row of {} is not within region boundary for region {}", cell, this); skippedEdits++; continue; } // Now, figure if we should skip this edit. if ( key.getSequenceId() <= maxSeqIdInStores.get(store.getColumnFamilyDescriptor().getName()) ) { skippedEdits++; continue; } PrivateCellUtil.setSequenceId(cell, currentReplaySeqId); restoreEdit(store, cell, memStoreSizing); editsCount++; } MemStoreSize mss = memStoreSizing.getMemStoreSize(); incMemStoreSize(mss); flush = isFlushSize(this.memStoreSizing.getMemStoreSize()); if (flush) { internalFlushcache(null, currentEditSeqId, stores.values(), status, false, FlushLifeCycleTracker.DUMMY); } if (coprocessorHost != null) { coprocessorHost.postWALRestore(this.getRegionInfo(), key, val); } } if (coprocessorHost != null) { coprocessorHost.postReplayWALs(this.getRegionInfo(), edits); } } catch (EOFException eof) { if (!conf.getBoolean(RECOVERED_EDITS_IGNORE_EOF, false)) { Path p = WALSplitUtil.moveAsideBadEditsFile(walFS, edits); msg = "EnLongAddered EOF. Most likely due to Master failure during " + "wal splitting, so we have this data in another edit. Continuing, but renaming " + edits + " as " + p + " for region " + this; LOG.warn(msg, eof); status.abort(msg); } else { LOG.warn("EOF while replaying recover edits and config '{}' is true so " + "we will ignore it and continue", RECOVERED_EDITS_IGNORE_EOF, eof); } } catch (IOException ioe) { // If the IOE resulted from bad file format, // then this problem is idempotent and retrying won't help if (ioe.getCause() instanceof ParseException) { Path p = WALSplitUtil.moveAsideBadEditsFile(walFS, edits); msg = "File corruption enLongAddered! " + "Continuing, but renaming " + edits + " as " + p; LOG.warn(msg, ioe); status.setStatus(msg); } else { status.abort(StringUtils.stringifyException(ioe)); // other IO errors may be transient (bad network connection, // checksum exception on one datanode, etc). throw & retry throw ioe; } } if (reporter != null && !reported_once) { reporter.progress(); } msg = "Applied " + editsCount + ", skipped " + skippedEdits + ", firstSequenceIdInLog=" + firstSeqIdInLog + ", maxSequenceIdInLog=" + currentEditSeqId + ", path=" + edits; status.markComplete(msg); LOG.debug(msg); return currentEditSeqId; } finally { status.cleanup(); } }
3.68
hibernate-validator_AnnotationProxy_run
/** * Runs the given privileged action, using a privileged block if required. * <p> * <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary * privileged actions within HV's protection domain. */ @IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17") private <T> T run(PrivilegedAction<T> action) { return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run(); }
3.68
hadoop_GlobPattern_set
/** * Set and compile a glob pattern * @param glob the glob pattern string */ public void set(String glob) { StringBuilder regex = new StringBuilder(); int setOpen = 0; int curlyOpen = 0; int len = glob.length(); hasWildcard = false; for (int i = 0; i < len; i++) { char c = glob.charAt(i); switch (c) { case BACKSLASH: if (++i >= len) { error("Missing escaped character", glob, i); } regex.append(c).append(glob.charAt(i)); continue; case '.': case '$': case '(': case ')': case '|': case '+': // escape regex special chars that are not glob special chars regex.append(BACKSLASH); break; case '*': regex.append('.'); hasWildcard = true; break; case '?': regex.append('.'); hasWildcard = true; continue; case '{': // start of a group regex.append("(?:"); // non-capturing curlyOpen++; hasWildcard = true; continue; case ',': regex.append(curlyOpen > 0 ? '|' : c); continue; case '}': if (curlyOpen > 0) { // end of a group curlyOpen--; regex.append(")"); continue; } break; case '[': if (setOpen > 0) { error("Unclosed character class", glob, i); } setOpen++; hasWildcard = true; break; case '^': // ^ inside [...] can be unescaped if (setOpen == 0) { regex.append(BACKSLASH); } break; case '!': // [! needs to be translated to [^ regex.append(setOpen > 0 && '[' == glob.charAt(i - 1) ? '^' : '!'); continue; case ']': // Many set errors like [][] could not be easily detected here, // as []], []-] and [-] are all valid POSIX glob and java regex. // We'll just let the regex compiler do the real work. setOpen = 0; break; default: } regex.append(c); } if (setOpen > 0) { error("Unclosed character class", glob, len); } if (curlyOpen > 0) { error("Unclosed group", glob, len); } compiled = Pattern.compile(regex.toString(), Pattern.DOTALL); }
3.68
flink_DecimalData_fromUnscaledBytes
/** * Creates an instance of {@link DecimalData} from an unscaled byte array value and the given * precision and scale. */ public static DecimalData fromUnscaledBytes(byte[] unscaledBytes, int precision, int scale) { BigDecimal bd = new BigDecimal(new BigInteger(unscaledBytes), scale); return fromBigDecimal(bd, precision, scale); }
3.68
framework_ContainerHelpers_getItemIdsUsingGetIdByIndex
/** * Get a range of item ids from the container using * {@link Indexed#getIdByIndex(int)}. This is just a helper method to aid * developers to quickly add the required functionality to a Container * during development. This should not be used in a "finished product" * unless fetching an id for an index is very inexpensive because a separate * request will be performed for each index in the range. * * @param startIndex * index of the first item id to get * @param numberOfIds * the number of consecutive items whose ids should be returned * @param container * the container from which the items should be fetched * @return A list of item ids in the range specified */ public static List<?> getItemIdsUsingGetIdByIndex(int startIndex, int numberOfIds, Container.Indexed container) { if (container == null) { throw new IllegalArgumentException( "The given container cannot be null!"); } if (startIndex < 0) { throw new IndexOutOfBoundsException( "Start index cannot be negative! startIndex=" + startIndex); } if (startIndex > container.size()) { throw new IndexOutOfBoundsException( "Start index exceeds container size! startIndex=" + startIndex + " containerLastItemIndex=" + (container.size() - 1)); } if (numberOfIds < 1) { if (numberOfIds == 0) { return Collections.emptyList(); } throw new IllegalArgumentException( "Cannot get negative amount of items! numberOfItems=" + numberOfIds); } // not included in the range int endIndex = startIndex + numberOfIds; if (endIndex > container.size()) { endIndex = container.size(); } List<Object> rangeOfIds = new ArrayList<Object>(); for (int i = startIndex; i < endIndex; i++) { Object idByIndex = container.getIdByIndex(i); if (idByIndex == null) { throw new RuntimeException("Unable to get item id for index: " + i + " from container using Container.Indexed#getIdByIndex() " + "even though container.size() > endIndex. " + "Returned item id was null. " + "Check your container implementation!"); } rangeOfIds.add(idByIndex); } return Collections.unmodifiableList(rangeOfIds); }
3.68
flink_BlobCacheService_setBlobServerAddress
/** * Sets the address of the {@link BlobServer}. * * @param blobServerAddress address of the {@link BlobServer}. */ public void setBlobServerAddress(InetSocketAddress blobServerAddress) { permanentBlobCache.setBlobServerAddress(blobServerAddress); transientBlobCache.setBlobServerAddress(blobServerAddress); }
3.68
hadoop_SlowPeerTracker_getJson
/** * Retrieve all valid reports as a JSON string. * @return serialized representation of valid reports. null if * serialization failed. */ public String getJson() { Collection<SlowPeerJsonReport> validReports = getJsonReports( maxNodesToReport); try { return WRITER.writeValueAsString(validReports); } catch (JsonProcessingException e) { // Failed to serialize. Don't log the exception call stack. LOG.debug("Failed to serialize statistics" + e); return null; } }
3.68
hadoop_BlobOperationDescriptor_getContentLengthIfKnown
/** * Gets the content length for the Azure Storage operation, or returns zero if * unknown. * @param conn the connection object for the Azure Storage operation. * @param operationType the Azure Storage operation type. * @return the content length, or zero if unknown. */ static long getContentLengthIfKnown(HttpURLConnection conn, OperationType operationType) { long contentLength = 0; switch (operationType) { case AppendBlock: case PutBlock: String lengthString = conn.getRequestProperty( HeaderConstants.CONTENT_LENGTH); contentLength = (lengthString != null) ? Long.parseLong(lengthString) : 0; break; case PutPage: case GetBlob: contentLength = BlobOperationDescriptor.getContentLengthIfKnown( conn.getRequestProperty("x-ms-range")); break; default: break; } return contentLength; }
3.68
zxing_AlignmentPatternFinder_handlePossibleCenter
/** * <p>This is called when a horizontal scan finds a possible alignment pattern. It will * cross check with a vertical scan, and if successful, will see if this pattern had been * found on a previous horizontal scan. If so, we consider it confirmed and conclude we have * found the alignment pattern.</p> * * @param stateCount reading state module counts from horizontal scan * @param i row where alignment pattern may be found * @param j end of possible alignment pattern in row * @return {@link AlignmentPattern} if we have found the same pattern twice, or null if not */ private AlignmentPattern handlePossibleCenter(int[] stateCount, int i, int j) { int stateCountTotal = stateCount[0] + stateCount[1] + stateCount[2]; float centerJ = centerFromEnd(stateCount, j); float centerI = crossCheckVertical(i, (int) centerJ, 2 * stateCount[1], stateCountTotal); if (!Float.isNaN(centerI)) { float estimatedModuleSize = (stateCount[0] + stateCount[1] + stateCount[2]) / 3.0f; for (AlignmentPattern center : possibleCenters) { // Look for about the same center and module size: if (center.aboutEquals(estimatedModuleSize, centerI, centerJ)) { return center.combineEstimate(centerI, centerJ, estimatedModuleSize); } } // Hadn't found this before; save it AlignmentPattern point = new AlignmentPattern(centerJ, centerI, estimatedModuleSize); possibleCenters.add(point); if (resultPointCallback != null) { resultPointCallback.foundPossibleResultPoint(point); } } return null; }
3.68
hudi_HoodieMetaSyncOperations_dropTable
/** * Drop table from metastore. */ default void dropTable(String tableName) { }
3.68
hbase_MemStoreLABImpl_incScannerCount
/** * Called when opening a scanner on the data of this MemStoreLAB */ @Override public void incScannerCount() { this.refCnt.retain(); }
3.68
hadoop_RouterAuditLogger_createStringBuilderForSuccessEvent
/** * A helper function for creating the common portion of a successful * log message. */ private static StringBuilder createStringBuilderForSuccessEvent(String user, String operation, String target) { StringBuilder b = new StringBuilder(); start(Keys.USER, user, b); addRemoteIP(b); add(Keys.OPERATION, operation, b); add(Keys.TARGET, target, b); add(Keys.RESULT, AuditConstants.SUCCESS, b); return b; }
3.68
druid_ZookeeperNodeListener_refresh
/** * Build Properties from PathChildrenCache. * Should be called after init(). * * @see #getPropertiesFromCache() */ @Override public List<NodeEvent> refresh() { lock.lock(); try { Properties properties = getPropertiesFromCache(); List<NodeEvent> events = NodeEvent.getEventsByDiffProperties(getProperties(), properties); if (events != null && !events.isEmpty()) { setProperties(properties); } return events; } finally { lock.unlock(); } }
3.68
flink_KubernetesStateHandleStore_replaceEntry
/** * Replace the entry in the ConfigMap. If the entry already exists and contains delete marker, * we treat it as non-existent and perform the best effort removal. */ private Optional<KubernetesConfigMap> replaceEntry( KubernetesConfigMap configMap, String key, byte[] serializedStateHandle, AtomicReference<RetrievableStateHandle<T>> oldStateHandleRef) throws NotExistException { final String content = configMap.getData().get(key); if (content != null) { try { final StateHandleWithDeleteMarker<T> stateHandle = deserializeStateHandle(content); oldStateHandleRef.set(stateHandle.getInner()); if (stateHandle.isMarkedForDeletion()) { final NotExistException exception = getKeyNotExistException(key); try { // Try to finish the removal. We don't really care whether this succeeds or // not, from the "replace" point of view, the entry doesn't exist. releaseAndTryRemove(key); } catch (Exception e) { exception.addSuppressed(e); } throw exception; } } catch (IOException e) { // Just log the invalid entry, it will be removed by the update code path below. logInvalidEntry(key, configMapName, e); } configMap.getData().put(key, toBase64(serializedStateHandle)); return Optional.of(configMap); } throw getKeyNotExistException(key); }
3.68
pulsar_PulsarClientImplementationBinding_getBytes
/** * Retrieves ByteBuffer data into byte[]. * * @param byteBuffer * @return */ static byte[] getBytes(ByteBuffer byteBuffer) { if (byteBuffer == null) { return null; } if (byteBuffer.hasArray() && byteBuffer.arrayOffset() == 0 && byteBuffer.array().length == byteBuffer.remaining()) { return byteBuffer.array(); } // Direct buffer is not backed by array and it needs to be read from direct memory byte[] array = new byte[byteBuffer.remaining()]; byteBuffer.get(array); return array; }
3.68
flink_StringColumnSummary_getMinLength
/** Shortest String length. */ public Integer getMinLength() { return minLength; }
3.68
hadoop_SignerFactory_registerSigner
/** * Register an implementation class for the given signer type. * * @param signerType The name of the signer type to register. * @param signerClass The class implementing the given signature protocol. */ public static void registerSigner( final String signerType, final Class<? extends Signer> signerClass) { checkArgument(signerType != null, "signerType cannot be null"); checkArgument(signerClass != null, "signerClass cannot be null"); SIGNERS.put(signerType, signerClass); }
3.68
hbase_WALKeyImpl_getNonce
/** Returns The nonce */ @Override public long getNonce() { return nonce; }
3.68
flink_MutableHashTable_open
/** * Opens the hash join. This method reads the build-side input and constructs the initial hash * table, gradually spilling partitions that do not fit into memory. * * @param buildSide Build side input. * @param probeSide Probe side input. * @param buildOuterJoin Whether outer join on build side. * @throws IOException Thrown, if an I/O problem occurs while spilling a partition. */ public void open( final MutableObjectIterator<BT> buildSide, final MutableObjectIterator<PT> probeSide, boolean buildOuterJoin) throws IOException { this.buildSideOuterJoin = buildOuterJoin; // sanity checks if (!this.closed.compareAndSet(true, false)) { throw new IllegalStateException( "Hash Join cannot be opened, because it is currently not closed."); } // grab the write behind buffers first for (int i = this.numWriteBehindBuffers; i > 0; --i) { this.writeBehindBuffers.add( this.availableMemory.remove(this.availableMemory.size() - 1)); } // open builds the initial table by consuming the build-side input this.currentRecursionDepth = 0; buildInitialTable(buildSide); // the first prober is the probe-side input this.probeIterator = new ProbeIterator<PT>(probeSide, this.probeSideSerializer.createInstance()); // the bucket iterator can remain constant over the time this.bucketIterator = new HashBucketIterator<BT, PT>( this.buildSideSerializer, this.recordComparator, probedSet, buildOuterJoin); }
3.68
flink_DynamicSinkUtils_prepareDynamicSink
/** * Prepares the given {@link DynamicTableSink}. It check whether the sink is compatible with the * INSERT INTO clause and applies initial parameters. */ private static void prepareDynamicSink( String tableDebugName, Map<String, String> staticPartitions, boolean isOverwrite, DynamicTableSink sink, ResolvedCatalogTable table, List<SinkAbilitySpec> sinkAbilitySpecs) { validatePartitioning(tableDebugName, staticPartitions, sink, table.getPartitionKeys()); validateAndApplyOverwrite(tableDebugName, isOverwrite, sink, sinkAbilitySpecs); validateAndApplyMetadata(tableDebugName, sink, table.getResolvedSchema(), sinkAbilitySpecs); }
3.68
hbase_Classes_extendedForName
/** * Equivalent of {@link Class#forName(String)} which also returns classes for primitives like * <code>boolean</code>, etc. The name of the class to retrieve. Can be either a normal class or a * primitive class. * @return The class specified by <code>className</code> If the requested class can not be found. */ public static Class<?> extendedForName(String className) throws ClassNotFoundException { Class<?> valueType; if (className.equals("boolean")) { valueType = boolean.class; } else if (className.equals("byte")) { valueType = byte.class; } else if (className.equals("short")) { valueType = short.class; } else if (className.equals("int")) { valueType = int.class; } else if (className.equals("long")) { valueType = long.class; } else if (className.equals("float")) { valueType = float.class; } else if (className.equals("double")) { valueType = double.class; } else if (className.equals("char")) { valueType = char.class; } else { valueType = Class.forName(className); } return valueType; }
3.68
dubbo_ReferenceConfig_aggregateUrlFromRegistry
/** * Get URLs from the registry and aggregate them. */ private void aggregateUrlFromRegistry(Map<String, String> referenceParameters) { checkRegistry(); List<URL> us = ConfigValidationUtils.loadRegistries(this, false); if (CollectionUtils.isNotEmpty(us)) { for (URL u : us) { URL monitorUrl = ConfigValidationUtils.loadMonitor(this, u); if (monitorUrl != null) { u = u.putAttribute(MONITOR_KEY, monitorUrl); } u = u.setScopeModel(getScopeModel()); u = u.setServiceModel(consumerModel); if (isInjvm() != null && isInjvm()) { u = u.addParameter(LOCAL_PROTOCOL, true); } urls.add(u.putAttribute(REFER_KEY, referenceParameters)); } } if (urls.isEmpty() && shouldJvmRefer(referenceParameters)) { URL injvmUrl = new URL(LOCAL_PROTOCOL, LOCALHOST_VALUE, 0, interfaceClass.getName()) .addParameters(referenceParameters); injvmUrl = injvmUrl.setScopeModel(getScopeModel()); injvmUrl = injvmUrl.setServiceModel(consumerModel); urls.add(injvmUrl.putAttribute(REFER_KEY, referenceParameters)); } if (urls.isEmpty()) { throw new IllegalStateException("No such any registry to reference " + interfaceName + " on the consumer " + NetUtils.getLocalHost() + " use dubbo version " + Version.getVersion() + ", please config <dubbo:registry address=\"...\" /> to your spring config."); } }
3.68
framework_VAbstractCalendarPanel_focusPreviousMonth
/** * Selects the previous month */ @SuppressWarnings("deprecation") private void focusPreviousMonth() { if (focusedDate == null) { return; } Date requestedPreviousMonthDate = (Date) focusedDate.clone(); removeOneMonth(requestedPreviousMonthDate); if (!isDateInsideRange(requestedPreviousMonthDate, getResolution(this::isMonth))) { return; } if (!isDateInsideRange(requestedPreviousMonthDate, getResolution(this::isDay))) { requestedPreviousMonthDate = adjustDateToFitInsideRange( requestedPreviousMonthDate); } focusedDate.setTime(requestedPreviousMonthDate.getTime()); displayedMonth.setMonth(displayedMonth.getMonth() - 1); renderCalendar(); }
3.68
hadoop_AbfsTokenRenewer_isManaged
/** * Checks if passed token is managed. * * @param token the token being checked * @return true if it is managed. * @throws IOException thrown when evaluating if token is managed. */ @Override public boolean isManaged(Token<?> token) throws IOException { return true; }
3.68
flink_ResultPartitionType_isPipelinedOrPipelinedBoundedResultPartition
/** * {@link #isPipelinedOrPipelinedBoundedResultPartition()} is used to judge whether it is the * specified {@link #PIPELINED} or {@link #PIPELINED_BOUNDED} resultPartitionType. * * <p>This method suitable for judgment conditions related to the specific implementation of * {@link ResultPartitionType}. * * <p>This method not related to data consumption and partition release. As for the logic * related to partition release, use {@link #isReleaseByScheduler()} instead, and as consume * type, use {@link #mustBePipelinedConsumed()} or {@link #canBePipelinedConsumed()} instead. */ public boolean isPipelinedOrPipelinedBoundedResultPartition() { return this == PIPELINED || this == PIPELINED_BOUNDED; }
3.68
querydsl_GenericExporter_setNamePrefix
/** * Set the name prefix * * @param prefix */ public void setNamePrefix(String prefix) { codegenModule.bind(CodegenModule.PREFIX, prefix); }
3.68
flink_Task_deliverOperatorEvent
/** * Dispatches an operator event to the invokable task. * * <p>If the event delivery did not succeed, this method throws an exception. Callers can use * that exception for error reporting, but need not react with failing this task (this method * takes care of that). * * @throws FlinkException This method throws exceptions indicating the reason why delivery did * not succeed. */ public void deliverOperatorEvent(OperatorID operator, SerializedValue<OperatorEvent> evt) throws FlinkException { final TaskInvokable invokable = this.invokable; final ExecutionState currentState = this.executionState; if (invokable == null || (currentState != ExecutionState.RUNNING && currentState != ExecutionState.INITIALIZING)) { throw new TaskNotRunningException("Task is not running, but in state " + currentState); } if (invokable instanceof CoordinatedTask) { try { ((CoordinatedTask) invokable).dispatchOperatorEvent(operator, evt); } catch (Throwable t) { ExceptionUtils.rethrowIfFatalErrorOrOOM(t); if (getExecutionState() == ExecutionState.RUNNING || getExecutionState() == ExecutionState.INITIALIZING) { FlinkException e = new FlinkException("Error while handling operator event", t); failExternally(e); throw e; } } } }
3.68
framework_ContainerEventProvider_ignoreContainerEvents
/** * Removes listeners from the container so no events are processed */ private void ignoreContainerEvents() { if (container instanceof ItemSetChangeNotifier) { ((ItemSetChangeNotifier) container) .removeItemSetChangeListener(this); } if (container instanceof ValueChangeNotifier) { ((ValueChangeNotifier) container).removeValueChangeListener(this); } }
3.68
hadoop_S3ListResult_representsEmptyDirectory
/** * Does this listing represent an empty directory? * @param dirKey directory key * @return true if the list is considered empty. */ public boolean representsEmptyDirectory( final String dirKey) { // If looking for an empty directory, the marker must exist but // no children. // So the listing must contain the marker entry only as an object, // and prefixes is null List<String> keys = objectKeys(); return keys.size() == 1 && keys.contains(dirKey) && getCommonPrefixes().isEmpty(); }
3.68
graphhopper_Entity_getStringField
/** @return the given column from the current row as a deduplicated String. */ protected String getStringField(String column, boolean required) throws IOException { return getFieldCheckRequired(column, required); }
3.68
open-banking-gateway_PaymentAccessFactory_paymentForAnonymousPsu
/** * Create {@code PaymentAccess} object that is similar to consent facing to anonymous (to OBG) user and ASPSP pair. * @param fintech Fintech that initiates the payment * @param aspsp ASPSP/Bank that is going to perform the payment * @param session Session that identifies the payment. * @return Payment context to authorize */ public PaymentAccess paymentForAnonymousPsu(Fintech fintech, Bank aspsp, ServiceSession session) { return new AnonymousPsuPaymentAccess(aspsp, fintech, fintechPubKeys, psuEncryption, session, paymentRepository); }
3.68
framework_JsonCodec_valueChanged
/** * Compares the value with the reference. If they match, returns false. * * @param fieldValue * @param referenceValue * @return */ private static boolean valueChanged(JsonValue fieldValue, JsonValue referenceValue) { if (fieldValue instanceof JsonNull) { fieldValue = null; } if (fieldValue == referenceValue) { return false; } else if (fieldValue == null || referenceValue == null) { return true; } else { return !jsonEquals(fieldValue, referenceValue); } }
3.68
flink_AbstractTopNFunction_setKeyContext
/** * Sets keyContext to RankFunction. * * @param keyContext keyContext of current function. */ public void setKeyContext(KeyContext keyContext) { this.keyContext = keyContext; }
3.68
hadoop_BulkDeleteRetryHandler_incrementStatistic
/** * Increment a statistic by a specific value. * This increments both the instrumentation and storage statistics. * @param statistic The operation to increment * @param count the count to increment */ protected void incrementStatistic(Statistic statistic, long count) { instrumentation.incrementCounter(statistic, count); }
3.68
framework_InfoSection_show
/* * (non-Javadoc) * * @see com.vaadin.client.debug.internal.Section#show() */ @Override public void show() { refresh(); }
3.68
framework_VaadinSession_createConnectorId
/** * Generate an id for the given Connector. Connectors must not call this * method more than once, the first time they need an id. * * @param connector * A connector that has not yet been assigned an id. * @return A new id for the connector * * @deprecated As of 7.0. Use * {@link VaadinService#generateConnectorId(VaadinSession, ClientConnector)} * instead. */ @Deprecated public String createConnectorId(ClientConnector connector) { return service.generateConnectorId(this, connector); }
3.68
flink_ParquetColumnarRowSplitReader_seekToRow
/** Seek to a particular row number. */ public void seekToRow(long rowCount) throws IOException { if (totalCountLoadedSoFar != 0) { throw new UnsupportedOperationException("Only support seek at first."); } List<BlockMetaData> blockMetaData = reader.getRowGroups(); for (BlockMetaData metaData : blockMetaData) { if (metaData.getRowCount() > rowCount) { break; } else { reader.skipNextRowGroup(); rowsReturned += metaData.getRowCount(); totalCountLoadedSoFar += metaData.getRowCount(); rowsInBatch = (int) metaData.getRowCount(); nextRow = (int) metaData.getRowCount(); rowCount -= metaData.getRowCount(); } } for (int i = 0; i < rowCount; i++) { boolean end = reachedEnd(); if (end) { throw new RuntimeException("Seek to many rows."); } nextRecord(); } }
3.68
dubbo_DubboNamespaceHandler_registerAnnotationConfigProcessors
/** * Register the processors for the Spring Annotation-Driven features * * @param registry {@link BeanDefinitionRegistry} * @see AnnotationConfigUtils * @since 2.7.5 */ private void registerAnnotationConfigProcessors(BeanDefinitionRegistry registry) { AnnotationConfigUtils.registerAnnotationConfigProcessors(registry); }
3.68
framework_ColorPickerPopup_setValue
/** * Sets the value of this object. If the new value is not equal to * {@code getValue()}, fires a {@link ValueChangeEvent}. Throws * {@code NullPointerException} if the value is null. * * @param color * the new value, not {@code null} * @throws NullPointerException * if {@code color} is {@code null} */ @Override public void setValue(Color color) { Objects.requireNonNull(color, "color cannot be null"); previouslySelectedColor = selectedColor; selectedColor = color; hsvGradient.setValue(selectedColor); hsvPreview.setValue(selectedColor); rgbGradient.setValue(selectedColor); rgbPreview.setValue(selectedColor); selPreview.setValue(selectedColor); }
3.68
pulsar_ResourceUsageTopicTransportManager_registerResourceUsagePublisher
/* * Register a resource owner (resource-group, tenant, namespace, topic etc). * * @param resource usage publisher */ public void registerResourceUsagePublisher(ResourceUsagePublisher r) { publisherMap.put(r.getID(), r); }
3.68
starts_Loadables_findUnreached
/** * This method takes (i) the dependencies that jdeps found and (i) the map from tests to reachable * types in the graph, and uses these to find types jdeps found but which are not reachable by any test. * @param deps The dependencies that jdeps found. * @param testDeps The map from test to types that can be reached in the graph. * @return The set of types that are not reachable by any test in the graph. */ private Set<String> findUnreached(Map<String, Set<String>> deps, Map<String, Set<String>> testDeps) { Set<String> allClasses = new HashSet<>(); for (String loc : deps.keySet()) { // 1. jdeps finds no dependencies for a class if the class' dependencies were not analyzed (e.g., no -R) // 2. every class in the CUT has non-empty jdeps dependency; they , at least, depend on java.lang.Object // 3. isWellKnownUrl will ignore classes from junit, hamcrest, maven, etc; we don't want to track those // 4. isIgnorableInternalName will ignore classes from standard library, mockito, jacoco String className = ChecksumUtil.toClassName(loc); if (!deps.get(loc).isEmpty() || !ChecksumUtil.isWellKnownUrl(className) || !Types.isIgnorableInternalName(className)) { // this means that this a class we want to track, either because it is in the CUT // or in some jar that we are tracking allClasses.add(loc); } } LOGGER.log(Level.INFO, "ALL(count): " + allClasses.size()); Set<String> reached = new HashSet<>(testDeps.keySet()); for (String test : testDeps.keySet()) { reached.addAll(testDeps.get(test)); } // remove the reached classes from allClasses to get the unreached classes. allClasses.removeAll(reached); return allClasses; }
3.68
hbase_StripeStoreFileManager_removeCompactedFiles
/** * Remove compacted files. */ private void removeCompactedFiles() { for (HStoreFile oldFile : this.compactedFiles) { byte[] oldEndRow = endOf(oldFile); List<HStoreFile> source = null; if (isInvalid(oldEndRow)) { source = getLevel0Copy(); } else { int stripeIndex = findStripeIndexByEndRow(oldEndRow); if (stripeIndex < 0) { throw new IllegalStateException( "An allegedly compacted file [" + oldFile + "] does not belong" + " to a known stripe (end row - [" + Bytes.toString(oldEndRow) + "])"); } source = getStripeCopy(stripeIndex); } if (!source.remove(oldFile)) { LOG.warn("An allegedly compacted file [{}] was not found", oldFile); } } }
3.68
framework_VCalendarPanel_isDateInsideRange
/** * Checks inclusively whether a date is inside a range of dates or not. * * @param date * @return */ private boolean isDateInsideRange(Date date, Resolution minResolution) { assert (date != null); return isAcceptedByRangeEnd(date, minResolution) && isAcceptedByRangeStart(date, minResolution); }
3.68
zxing_BinaryBitmap_isCropSupported
/** * @return Whether this bitmap can be cropped. */ public boolean isCropSupported() { return binarizer.getLuminanceSource().isCropSupported(); }
3.68
flink_KvStateRegistry_getKvState
/** * Returns the {@link KvStateEntry} containing the requested instance as identified by the given * KvStateID, along with its {@link KvStateInfo} or <code>null</code> if none is registered. * * @param kvStateId KvStateID to identify the KvState instance * @return The {@link KvStateEntry} instance identified by the KvStateID or <code>null</code> if * there is none */ public KvStateEntry<?, ?, ?> getKvState(KvStateID kvStateId) { return registeredKvStates.get(kvStateId); }
3.68
flink_BinaryStringDataUtil_hash
/** Calculate the hash value of a given string use {@link MessageDigest}. */ public static BinaryStringData hash(BinaryStringData str, MessageDigest md) { return hash(str.toBytes(), md); }
3.68
zilla_HpackContext_staticIndex15
// Index in static table for the given name of length 15 private static int staticIndex15(DirectBuffer name) { switch (name.getByte(14)) { case 'e': if (STATIC_TABLE[17].name.equals(name)) // accept-language { return 17; } break; case 'g': if (STATIC_TABLE[16].name.equals(name)) // accept-encoding { return 16; } break; } return -1; }
3.68
hadoop_RequestFactoryImpl_withStorageClass
/** * Storage class. * @param value new value * @return the builder */ public RequestFactoryBuilder withStorageClass(final StorageClass value) { storageClass = value; return this; }
3.68
hudi_CleanTask_newBuilder
/** * Utility to create builder for {@link CleanTask}. * * @return Builder for {@link CleanTask}. */ public static Builder newBuilder() { return new Builder(); }
3.68
graphhopper_DistanceCalcEarth_calcNormalizedDist
/** * Returns the specified length in normalized meter. */ @Override public double calcNormalizedDist(double dist) { double tmp = sin(dist / 2 / R); return tmp * tmp; }
3.68
flink_TemplateUtils_findResultOnlyTemplate
/** Hints that only declare a result (either accumulator or output). */ static @Nullable FunctionResultTemplate findResultOnlyTemplate( Set<FunctionResultTemplate> globalResultOnly, Set<FunctionResultTemplate> localResultOnly, Set<FunctionTemplate> explicitMappings, Function<FunctionTemplate, FunctionResultTemplate> accessor, String hintType) { final Set<FunctionResultTemplate> resultOnly = Stream.concat(globalResultOnly.stream(), localResultOnly.stream()) .collect(Collectors.toCollection(LinkedHashSet::new)); final Set<FunctionResultTemplate> allResults = Stream.concat(resultOnly.stream(), explicitMappings.stream().map(accessor)) .collect(Collectors.toCollection(LinkedHashSet::new)); if (resultOnly.size() == 1 && allResults.size() == 1) { return resultOnly.stream().findFirst().orElse(null); } // different results is only fine as long as those come from a mapping if (resultOnly.size() > 1 || (!resultOnly.isEmpty() && !explicitMappings.isEmpty())) { throw extractionError( String.format( "%s hints that lead to ambiguous results are not allowed.", hintType)); } return null; }
3.68
hbase_ConcurrentMapUtils_computeIfAbsentEx
/** * In HBASE-16648 we found that ConcurrentHashMap.get is much faster than computeIfAbsent if the * value already exists. So here we copy the implementation of * {@link ConcurrentMap#computeIfAbsent(Object, java.util.function.Function)}. It uses get and * putIfAbsent to implement computeIfAbsent. And notice that the implementation does not guarantee * that the supplier will only be executed once. */ public static <K, V> V computeIfAbsentEx(ConcurrentMap<K, V> map, K key, IOExceptionSupplier<V> supplier) throws IOException { V v, newValue; return ((v = map.get(key)) == null && (newValue = supplier.get()) != null && (v = map.putIfAbsent(key, newValue)) == null) ? newValue : v; }
3.68
morf_SchemaAdapter_getView
/** * @see org.alfasoftware.morf.metadata.Schema#getView(java.lang.String) */ @Override public View getView(String name) { return delegate.getView(name); }
3.68
hbase_ExampleMasterObserverWithMetrics_getMaxMemory
/** Returns the max memory of the process. We will use this to define a gauge metric */ private long getMaxMemory() { return Runtime.getRuntime().maxMemory(); }
3.68
hibernate-validator_GroupSequenceCheck_redefinesDefaultGroupSequence
/** * Check if the given {@link TypeMirror} redefines the default group sequence for the annotated class. * <p> * Note that it is only the case if the annotated element is a class. */ private boolean redefinesDefaultGroupSequence(TypeElement annotatedElement, TypeMirror typeMirror) { return ElementKind.CLASS.equals( annotatedElement.getKind() ) && typeUtils.isSameType( annotatedElement.asType(), typeMirror ); }
3.68
flink_CloseableIterable_empty
/** Returns an empty iterator. */ static <T> CloseableIterable<T> empty() { return new CloseableIterable.Empty<>(); }
3.68
flink_CsvReader_tupleType
/** * Configures the reader to read the CSV data and parse it to the given type. The type must be a * subclass of {@link Tuple}. The type information for the fields is obtained from the type * class. The type consequently needs to specify all generic field types of the tuple. * * @param targetType The class of the target type, needs to be a subclass of Tuple. * @return The DataSet representing the parsed CSV data. */ public <T extends Tuple> DataSource<T> tupleType(Class<T> targetType) { Preconditions.checkNotNull(targetType, "The target type class must not be null."); if (!Tuple.class.isAssignableFrom(targetType)) { throw new IllegalArgumentException( "The target type must be a subclass of " + Tuple.class.getName()); } @SuppressWarnings("unchecked") TupleTypeInfo<T> typeInfo = (TupleTypeInfo<T>) TypeExtractor.createTypeInfo(targetType); CsvInputFormat<T> inputFormat = new TupleCsvInputFormat<T>( path, this.lineDelimiter, this.fieldDelimiter, typeInfo, this.includedMask); Class<?>[] classes = new Class<?>[typeInfo.getArity()]; for (int i = 0; i < typeInfo.getArity(); i++) { classes[i] = typeInfo.getTypeAt(i).getTypeClass(); } configureInputFormat(inputFormat); return new DataSource<T>( executionContext, inputFormat, typeInfo, Utils.getCallLocationName()); }
3.68