name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
framework_VTree_getState
/** For internal use only. May be removed or replaced in the future. */ public boolean getState() { return open; }
3.68
flink_OptimizerNode_getOperator
/** * Gets the operator represented by this optimizer node. * * @return This node's operator. */ public Operator<?> getOperator() { return this.operator; }
3.68
pulsar_AbstractSchema_atSchemaVersion
/** * Return an instance of this schema at the given version. * @param schemaVersion the version * @return the schema at that specific version * @throws SchemaSerializationException in case of unknown schema version * @throws NullPointerException in case of null schemaVersion and supportSchemaVersioning is true */ public Schema<?> atSchemaVersion(byte[] schemaVersion) throws SchemaSerializationException { if (!supportSchemaVersioning()) { return this; } Objects.requireNonNull(schemaVersion); throw new SchemaSerializationException("Not implemented for " + this.getClass()); }
3.68
hbase_HBaseMetrics2HadoopMetricsAdapter_addMeter
/** * Add Dropwizard-Metrics rate information to a Hadoop-Metrics2 record builder, converting the * rates to the appropriate unit. * @param builder A Hadoop-Metrics2 record builder. * @param name A base name for this record. */ private void addMeter(String name, Meter meter, MetricsRecordBuilder builder) { builder.addGauge(Interns.info(name + "_count", EMPTY_STRING), meter.getCount()); builder.addGauge(Interns.info(name + "_mean_rate", EMPTY_STRING), meter.getMeanRate()); builder.addGauge(Interns.info(name + "_1min_rate", EMPTY_STRING), meter.getOneMinuteRate()); builder.addGauge(Interns.info(name + "_5min_rate", EMPTY_STRING), meter.getFiveMinuteRate()); builder.addGauge(Interns.info(name + "_15min_rate", EMPTY_STRING), meter.getFifteenMinuteRate()); }
3.68
hadoop_TimelineCollectorWebService_putEntities
/** * Accepts writes to the collector, and returns a response. It simply routes * the request to the app level collector. It expects an application as a * context. * * @param req Servlet request. * @param res Servlet response. * @param async flag indicating whether its an async put or not. "true" * indicates, its an async call. If null, its considered false. * @param appId Application Id to which the entities to be put belong to. If * appId is not there or it cannot be parsed, HTTP 400 will be sent back. * @param entities timeline entities to be put. * @return a Response with appropriate HTTP status. */ @PUT @Path("/entities") @Consumes({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */}) public Response putEntities( @Context HttpServletRequest req, @Context HttpServletResponse res, @QueryParam("async") String async, @QueryParam("subappwrite") String isSubAppEntities, @QueryParam("appid") String appId, TimelineEntities entities) { init(res); UserGroupInformation callerUgi = getUser(req); boolean isAsync = async != null && async.trim().equalsIgnoreCase("true"); if (callerUgi == null) { String msg = "The owner of the posted timeline entities is not set"; LOG.error(msg); throw new ForbiddenException(msg); } long startTime = Time.monotonicNow(); boolean succeeded = false; try { ApplicationId appID = parseApplicationId(appId); if (appID == null) { return Response.status(Response.Status.BAD_REQUEST).build(); } NodeTimelineCollectorManager collectorManager = (NodeTimelineCollectorManager) context.getAttribute( NodeTimelineCollectorManager.COLLECTOR_MANAGER_ATTR_KEY); TimelineCollector collector = collectorManager.get(appID); if (collector == null) { LOG.error("Application: {} is not found", appId); throw new NotFoundException("Application: "+ appId + " is not found"); } if (isAsync) { collector.putEntitiesAsync(processTimelineEntities(entities, appId, Boolean.valueOf(isSubAppEntities)), callerUgi); } else { collector.putEntities(processTimelineEntities(entities, appId, Boolean.valueOf(isSubAppEntities)), callerUgi); } succeeded = true; return Response.ok().build(); } catch (NotFoundException | ForbiddenException e) { throw new WebApplicationException(e, Response.Status.INTERNAL_SERVER_ERROR); } catch (IOException e) { LOG.error("Error putting entities", e); throw new WebApplicationException(e, Response.Status.INTERNAL_SERVER_ERROR); } catch (Exception e) { LOG.error("Unexpected error while putting entities", e); throw new WebApplicationException(e, Response.Status.INTERNAL_SERVER_ERROR); } finally { long latency = Time.monotonicNow() - startTime; if (isAsync) { METRICS.addAsyncPutEntitiesLatency(latency, succeeded); } else { METRICS.addPutEntitiesLatency(latency, succeeded); } } }
3.68
framework_PropertysetItem_removeItemProperty
/** * Removes the Property identified by ID from the Item. This functionality * is optional. If the method is not implemented, the method always returns * <code>false</code>. * * @param id * the ID of the Property to be removed. * @return <code>true</code> if the operation succeeded <code>false</code> * if not */ @Override public boolean removeItemProperty(Object id) { // Can't remove missing properties if (map.remove(id) == null) { return false; } list.remove(id); // Send change events fireItemPropertySetChange(); return true; }
3.68
hadoop_FileIoProvider_write
/** * {@inheritDoc}. */ @Override public void write(@Nonnull byte[] b, int off, int len) throws IOException { final long begin = profilingEventHook.beforeFileIo(volume, WRITE, len); try { faultInjectorEventHook.beforeFileIo(volume, WRITE, len); super.write(b, off, len); profilingEventHook.afterFileIo(volume, WRITE, begin, len); } catch(Exception e) { onFailure(volume, begin); throw e; } }
3.68
pulsar_AuthenticationProviderOpenID_authenticateAsync
/** * Authenticate the parameterized {@link AuthenticationDataSource} by verifying the issuer is an allowed issuer, * then retrieving the JWKS URI from the issuer, then retrieving the Public key from the JWKS URI, and finally * verifying the JWT signature and claims. * * @param authData - the authData passed by the Pulsar Broker containing the token. * @return the role, if the JWT is authenticated, otherwise a failed future. */ @Override public CompletableFuture<String> authenticateAsync(AuthenticationDataSource authData) { return authenticateTokenAsync(authData).thenApply(this::getRole); }
3.68
hadoop_OBSDataBlocks_createFactory
/** * Create a factory. * * @param owner factory owner * @param name factory name -the option from {@link OBSConstants}. * @return the factory, ready to be initialized. * @throws IllegalArgumentException if the name is unknown. */ static BlockFactory createFactory(final OBSFileSystem owner, final String name) { switch (name) { case OBSConstants.FAST_UPLOAD_BUFFER_ARRAY: return new ByteArrayBlockFactory(owner); case OBSConstants.FAST_UPLOAD_BUFFER_DISK: return new DiskBlockFactory(owner); case OBSConstants.FAST_UPLOAD_BYTEBUFFER: return new ByteBufferBlockFactory(owner); default: throw new IllegalArgumentException( "Unsupported block buffer" + " \"" + name + '"'); } }
3.68
querydsl_JPAExpressions_treat
/** * Create a JPA 2.1 treated path. * * @param path The path to apply the treat operation on * @param subtype subtype class * @param <U> the subtype class * @param <T> the expression type * @return subtype instance with the same identity */ public static <U extends BeanPath<? extends T>, T> U treat(BeanPath<? extends T> path, Class<U> subtype) { try { Class<? extends T> entitySubType = getConcreteEntitySubType(subtype); PathMetadata pathMetadata = new PathMetadata(path, getEntityName(entitySubType), PathType.TREATED_PATH); return subtype.getConstructor(PathMetadata.class).newInstance(pathMetadata); } catch (InstantiationException e) { throw new ExpressionException(e.getMessage(), e); } catch (IllegalAccessException e) { throw new ExpressionException(e.getMessage(), e); } catch (InvocationTargetException e) { throw new ExpressionException(e.getMessage(), e); } catch (NoSuchMethodException e) { throw new ExpressionException(e.getMessage(), e); } }
3.68
AreaShop_RentRegion_getFormattedPrice
/** * Get the formatted string of the price (includes prefix and suffix). * @return The formatted string of the price */ public String getFormattedPrice() { return Utils.formatCurrency(getPrice()); }
3.68
dubbo_UrlUtils_serializationId
/** * Get the serialization id * * @param url url * @return {@link Byte} */ public static Byte serializationId(URL url) { Byte serializationId; // Obtain the value from prefer_serialization. Such as.fastjson2,hessian2 List<String> preferSerials = preferSerialization(url); for (String preferSerial : preferSerials) { if ((serializationId = CodecSupport.getIDByName(preferSerial)) != null) { return serializationId; } } // Secondly, obtain the value from serialization if ((serializationId = CodecSupport.getIDByName(url.getParameter(SERIALIZATION_KEY))) != null) { return serializationId; } // Finally, use the default serialization type return CodecSupport.getIDByName(DefaultSerializationSelector.getDefaultRemotingSerialization()); }
3.68
graphhopper_TarjanSCC_getTotalComponents
/** * The total number of strongly connected components. This always includes single-node components. */ public int getTotalComponents() { return numComponents; }
3.68
hbase_AbstractFSWALProvider_getWALPrefixFromWALName
/** * Get prefix of the log from its name, assuming WAL name in format of * log_prefix.filenumber.log_suffix * @param name Name of the WAL to parse * @return prefix of the log * @throws IllegalArgumentException if the name passed in is not a valid wal file name * @see AbstractFSWAL#getCurrentFileName() */ public static String getWALPrefixFromWALName(String name) { return getWALNameGroupFromWALName(name, 1); }
3.68
pulsar_ManagedLedgerConfig_getMaxUnackedRangesToPersistInMetadataStore
/** * @return max unacked message ranges up to which it can store in Zookeeper * */ public int getMaxUnackedRangesToPersistInMetadataStore() { return maxUnackedRangesToPersistInMetadataStore; }
3.68
flink_JobVertex_setName
/** * Sets the name of the vertex. * * @param name The new name. */ public void setName(String name) { this.name = name == null ? DEFAULT_NAME : name; }
3.68
morf_Cast_getScale
/** * @return the scale */ public int getScale() { return scale; }
3.68
hadoop_S3APrefetchingInputStream_getPos
/** * Gets the current position. If the underlying S3 input stream is closed, * it returns last read current position from the underlying steam. If the * current position was never read and the underlying input stream is closed, * this would return 0. * * @return the current position. * @throws IOException if there is an IO error during this operation. */ @Override public synchronized long getPos() throws IOException { if (!isClosed()) { lastReadCurrentPos = inputStream.getPos(); } return lastReadCurrentPos; }
3.68
hudi_HoodieTable_getMetadataWriter
/** * Gets the metadata writer for regular writes. * * @param triggeringInstantTimestamp The instant that is triggering this metadata write. * @return An instance of {@link HoodieTableMetadataWriter}. */ /** * Get Table metadata writer. * <p> * Note: * Get the metadata writer for the conf. If the metadata table doesn't exist, * this wil trigger the creation of the table and the initial bootstrapping. * Since this call is under the transaction lock, other concurrent writers * are blocked from doing the similar initial metadata table creation and * the bootstrapping. * * @param triggeringInstantTimestamp The instant that is triggering this metadata write * @param failedWritesCleaningPolicy Cleaning policy on failed writes * @return instance of {@link HoodieTableMetadataWriter} */ protected Option<HoodieTableMetadataWriter> getMetadataWriter( String triggeringInstantTimestamp, HoodieFailedWritesCleaningPolicy failedWritesCleaningPolicy) { // Each engine is expected to override this and // provide the actual metadata writer, if enabled. return Option.empty(); }
3.68
flink_TypeInferenceExtractor_forAsyncTableFunction
/** Extracts a type inference from a {@link AsyncTableFunction}. */ public static TypeInference forAsyncTableFunction( DataTypeFactory typeFactory, Class<? extends AsyncTableFunction<?>> function) { final FunctionMappingExtractor mappingExtractor = new FunctionMappingExtractor( typeFactory, function, UserDefinedFunctionHelper.ASYNC_TABLE_EVAL, createParameterSignatureExtraction(1), null, createGenericResultExtraction(AsyncTableFunction.class, 0, true), createParameterWithArgumentVerification(CompletableFuture.class)); return extractTypeInference(mappingExtractor); }
3.68
rocketmq-connect_MemoryStateManagementServiceImpl_putSafe
/** * Safely set the state of the task to the given value. What is * considered "safe" depends on the implementation, but basically it * means that the store can provide higher assurance that another worker * hasn't concurrently written any conflicting data. * * @param status the status of the task */ @Override public synchronized void putSafe(TaskStatus status) { put(status); }
3.68
framework_VComboBox_getSelectedCaption
/** * This method is meant for internal use and may change in future versions. * * @since 7.7 * @return the caption of selected item, if "scroll to page" is disabled */ public String getSelectedCaption() { return explicitSelectedCaption; }
3.68
dubbo_MeshRuleDispatcher_getListenerMap
/** * For ut only */ @Deprecated public Map<String, Set<MeshRuleListener>> getListenerMap() { return listenerMap; }
3.68
framework_LayoutDependencyTree_markAsHorizontallyLayouted
/** * Marks the managed layout as layouted horizontally and propagates the need * of horizontal measuring for any components that might have got their size * changed as a result. If there are blockers, nothing is done. * * @param layout * the managed layout whose horizontal layouting has been done, * should not be {@code null} */ public void markAsHorizontallyLayouted(ManagedLayout layout) { LayoutDependency dependency = getDependency(layout.getConnectorId(), HORIZONTAL); dependency.markAsLayouted(); }
3.68
hbase_JVM_isUnix
/** * Check if the OS is unix. * @return whether this is unix or not. */ public static boolean isUnix() { if (windows) { return false; } return (ibmvendor ? linux : true); }
3.68
hudi_ByteBufferBackedInputStream_copyFrom
/** * Copies at most {@code length} bytes starting from position {@code pos} into the target * buffer with provided {@code offset}. Returns number of bytes copied from the backing buffer * * NOTE: This does not change the current position of the stream and is thread-safe * * @param pos absolute position w/in stream to read from * @param targetBuffer target buffer to copy into * @param offset target buffer offset to copy at * @param length length of the sequence to copy * @return number of bytes copied */ public int copyFrom(long pos, byte[] targetBuffer, int offset, int length) { int bufferPos = bufferOffset + (int) pos; if (bufferPos > buffer.limit()) { throw new IllegalArgumentException( String.format("Can't read past the backing buffer boundary (offset %d, length %d)", pos, buffer.limit() - bufferOffset) ); } else if (length > targetBuffer.length) { throw new IllegalArgumentException( String.format("Target buffer is too small (length %d, buffer size %d)", length, targetBuffer.length) ); } // Determine total number of bytes available to read int available = Math.min(length, buffer.limit() - bufferPos); // Get current buffer position in the backing array System.arraycopy(buffer.array(), bufferPos, targetBuffer, offset, available); return available; }
3.68
morf_AbstractSqlDialectTest_testDaysBetween
/** * Test that DAYS_BETWEEN functionality behaves as expected */ @Test public void testDaysBetween() { SelectStatement testStatement = select(daysBetween(field("dateOne"), field("dateTwo"))) .from(tableRef("MyTable")); assertEquals(expectedDaysBetween(), testDialect.convertStatementToSQL(testStatement)); }
3.68
framework_VaadinSession_getCsrfToken
/** * Gets the CSRF token (aka double submit cookie) that is used to protect * against Cross Site Request Forgery attacks. * * @since 7.1 * @return the csrf token string */ public String getCsrfToken() { assert hasLock(); return csrfToken; }
3.68
shardingsphere-elasticjob_JobNodeStorage_getJobNodeDataDirectly
/** * Get job node data from registry center directly. * * @param node node * @return data of job node */ public String getJobNodeDataDirectly(final String node) { return regCenter.getDirectly(jobNodePath.getFullPath(node)); }
3.68
hbase_MultiRowRangeFilter_isIterationComplete
/** * Returns true if we exhausted searching all row ranges. */ public boolean isIterationComplete(int index) { return index >= ranges.size(); }
3.68
framework_MultiSelectionEvent_getNewSelection
/** * Gets the new selection. * <p> * The result is the current selection of the source * {@link AbstractMultiSelect} object. So it's always exactly the same as * {@link AbstractMultiSelect#getValue()} * * @see #getValue() * * @return a set of items selected after the selection was changed */ public Set<T> getNewSelection() { return getValue(); }
3.68
graphhopper_HmmProbabilities_transitionLogProbability
/** * Returns the logarithmic transition probability density for the given * transition parameters. * * @param routeLength Length of the shortest route [m] between two * consecutive map matching candidates. * @param linearDistance Linear distance [m] between two consecutive GPS * measurements. */ public double transitionLogProbability(double routeLength, double linearDistance) { // Transition metric taken from Newson & Krumm. double transitionMetric = Math.abs(linearDistance - routeLength); return Distributions.logExponentialDistribution(beta, transitionMetric); }
3.68
framework_Table_enableContentRefreshing
/** * Go to mode where content content refreshing has effect. * * @param refreshContent * true if content refresh needs to be done */ protected void enableContentRefreshing(boolean refreshContent) { isContentRefreshesEnabled = true; if (refreshContent) { refreshRenderedCells(); // Ensure that client gets a response markAsDirty(); } }
3.68
hbase_Constraints_addConstraint
/** * Write the raw constraint and configuration to the descriptor. * <p/> * This method takes care of creating a new configuration based on the passed in configuration and * then updating that with enabled and priority of the constraint. * <p/> * When a constraint is added, it is automatically enabled. */ private static TableDescriptorBuilder addConstraint(TableDescriptorBuilder builder, Class<? extends Constraint> clazz, Configuration conf, long priority) throws IOException { return writeConstraint(builder, serializeConstraintClass(clazz), configure(conf, true, priority)); }
3.68
querydsl_StringExpression_endsWith
/** * Create a {@code this.endsWith(str)} expression * * <p>Returns true if this ends with str</p> * * @param str string * @return this.endsWith(str) * @see java.lang.String#endsWith(String) */ public BooleanExpression endsWith(String str) { return endsWith(ConstantImpl.create(str)); }
3.68
framework_DesignContext_getLocalId
/** * Returns the local id of the created component or null if not exist. * * @return the localId */ public String getLocalId() { return localId; }
3.68
flink_PrioritizedOperatorSubtaskState_getJobManagerManagedOperatorState
/** * Returns the managed operator state from the job manager, which represents the ground truth * about what this state should represent. This is the alternative with lowest priority. */ @Nonnull public StateObjectCollection<OperatorStateHandle> getJobManagerManagedOperatorState() { return lastElement(prioritizedManagedOperatorState); }
3.68
flink_WindowValueState_update
/** * Update the state with the given value under current key and the given window. * * @param window the window namespace. * @param value the new value for the state. */ public void update(W window, RowData value) throws IOException { windowState.setCurrentNamespace(window); windowState.update(value); }
3.68
framework_AbstractInMemoryContainer_fireItemAdded
/** * Notify item set change listeners that an item has been added to the * container. * * @since 7.4 * * @param position * position of the added item in the view * @param itemId * id of the added item * @param item * the added item */ protected void fireItemAdded(int position, ITEMIDTYPE itemId, ITEMCLASS item) { fireItemsAdded(position, itemId, 1); }
3.68
framework_AbstractColorPicker_setPopupStyle
/** * The style for the popup window. * * @param style * The style */ public void setPopupStyle(PopupStyle style) { popupStyle = style; switch (style) { case POPUP_NORMAL: { setRGBVisibility(true); setHSVVisibility(true); setSwatchesVisibility(true); setHistoryVisibility(true); setTextfieldVisibility(true); break; } case POPUP_SIMPLE: { setRGBVisibility(false); setHSVVisibility(false); setSwatchesVisibility(true); setHistoryVisibility(false); setTextfieldVisibility(false); break; } } }
3.68
flink_CheckpointConfig_deleteOnCancellation
/** * Returns whether persistent checkpoints shall be discarded on cancellation of the job. * * @return <code>true</code> if persistent checkpoints shall be discarded on cancellation of * the job. */ public boolean deleteOnCancellation() { return this == DELETE_ON_CANCELLATION; }
3.68
dubbo_HttpHeaderUtil_parseRequestHeader
/** * parse rest request header attachment & header * * @param rpcInvocation * @param requestFacade */ public static void parseRequestHeader(RpcInvocation rpcInvocation, RequestFacade requestFacade) { Enumeration<String> headerNames = requestFacade.getHeaderNames(); while (headerNames.hasMoreElements()) { String header = headerNames.nextElement(); if (!isRestAttachHeader(header)) { // attribute rpcInvocation.put(header, requestFacade.getHeader(header)); continue; } // attachment rpcInvocation.setAttachment(subRestAttachRealHeaderPrefix(header.trim()), requestFacade.getHeader(header)); } }
3.68
hudi_HoodieBloomIndex_lookupIndex
/** * Lookup the location for each record key and return the pair<record_key,location> for all record keys already * present and drop the record keys if not present. */ private HoodiePairData<HoodieKey, HoodieRecordLocation> lookupIndex( HoodiePairData<String, String> partitionRecordKeyPairs, final HoodieEngineContext context, final HoodieTable hoodieTable) { // Step 1: Obtain records per partition, in the incoming records Map<String, Long> recordsPerPartition = partitionRecordKeyPairs.countByKey(); List<String> affectedPartitionPathList = new ArrayList<>(recordsPerPartition.keySet()); // Step 2: Load all involved files as <Partition, filename> pairs List<Pair<String, BloomIndexFileInfo>> fileInfoList = getBloomIndexFileInfoForPartitions(context, hoodieTable, affectedPartitionPathList); final Map<String, List<BloomIndexFileInfo>> partitionToFileInfo = fileInfoList.stream().collect(groupingBy(Pair::getLeft, mapping(Pair::getRight, toList()))); // Step 3: Obtain a HoodieData, for each incoming record, that already exists, with the file id, // that contains it. HoodiePairData<HoodieFileGroupId, String> fileComparisonPairs = explodeRecordsWithFileComparisons(partitionToFileInfo, partitionRecordKeyPairs); return bloomIndexHelper.findMatchingFilesForRecordKeys(config, context, hoodieTable, partitionRecordKeyPairs, fileComparisonPairs, partitionToFileInfo, recordsPerPartition); }
3.68
hadoop_TimelineMetricCalculator_sub
/** * Subtract operation between two Numbers. * @param n1 Number n1 * @param n2 Number n2 * @return Number represent to (n1 - n2). */ public static Number sub(Number n1, Number n2) { if (n1 == null) { throw new YarnRuntimeException( "Number to be subtracted shouldn't be null."); } else if (n2 == null) { return n1; } if (n1 instanceof Integer || n1 instanceof Long) { return n1.longValue() - n2.longValue(); } if (n1 instanceof Float || n1 instanceof Double) { return n1.doubleValue() - n2.doubleValue(); } // TODO throw warnings/exceptions for other types of number. return null; }
3.68
hbase_StoreFileReader_passesGeneralRowColBloomFilter
/** * A method for checking Bloom filters. Called directly from StoreFileScanner in case of a * multi-column query. the cell to check if present in BloomFilter * @return True if passes */ public boolean passesGeneralRowColBloomFilter(Cell cell) { BloomFilter bloomFilter = this.generalBloomFilter; if (bloomFilter == null) { bloomFilterMetrics.incrementEligible(); return true; } // Used in ROW_COL bloom Cell kvKey = null; // Already if the incoming key is a fake rowcol key then use it as it is if (cell.getTypeByte() == KeyValue.Type.Maximum.getCode() && cell.getFamilyLength() == 0) { kvKey = cell; } else { kvKey = PrivateCellUtil.createFirstOnRowCol(cell); } return checkGeneralBloomFilter(null, kvKey, bloomFilter); }
3.68
pulsar_ConfigUtils_getConfigValueAsString
/** * Get configured property as a string. If not configured, return null. * @param conf - the configuration map * @param configProp - the property to get * @param defaultValue - the value to use if the configuration value is not set * @return a string from the conf or the default value */ static String getConfigValueAsString(ServiceConfiguration conf, String configProp, String defaultValue) throws IllegalArgumentException { String value = getConfigValueAsStringImpl(conf, configProp); if (value == null) { value = defaultValue; } log.info("Configuration for [{}] is [{}]", configProp, value); return value; }
3.68
hudi_BulkInsertPartitioner_tryPrependPartitionPathColumns
/* * If possible, we want to sort the data by partition path. Doing so will reduce the number of files written. * This will not change the desired sort order, it is just a performance improvement. **/ static String[] tryPrependPartitionPathColumns(String[] columnNames, HoodieWriteConfig config) { String partitionPath; if (config.populateMetaFields()) { partitionPath = HoodieRecord.HoodieMetadataField.PARTITION_PATH_METADATA_FIELD.getFieldName(); } else { partitionPath = config.getString(KeyGeneratorOptions.PARTITIONPATH_FIELD_NAME.key()); } if (isNullOrEmpty(partitionPath)) { return columnNames; } Set<String> sortCols = new LinkedHashSet<>(StringUtils.split(partitionPath, ",")); sortCols.addAll(Arrays.asList(columnNames)); return sortCols.toArray(new String[0]); }
3.68
hbase_Reference_read
/** * Read a Reference from FileSystem. * @return New Reference made from passed <code>p</code> */ public static Reference read(final FileSystem fs, final Path p) throws IOException { InputStream in = fs.open(p); try { // I need to be able to move back in the stream if this is not a pb serialization so I can // do the Writable decoding instead. in = in.markSupported() ? in : new BufferedInputStream(in); int pblen = ProtobufUtil.lengthOfPBMagic(); in.mark(pblen); byte[] pbuf = new byte[pblen]; IOUtils.readFully(in, pbuf, 0, pblen); // WATCHOUT! Return in middle of function!!! if (ProtobufUtil.isPBMagicPrefix(pbuf)) return convert(FSProtos.Reference.parseFrom(in)); // Else presume Writables. Need to reset the stream since it didn't start w/ pb. // We won't bother rewriting thie Reference as a pb since Reference is transitory. in.reset(); Reference r = new Reference(); DataInputStream dis = new DataInputStream(in); // Set in = dis so it gets the close below in the finally on our way out. in = dis; r.readFields(dis); return r; } finally { in.close(); } }
3.68
pulsar_LocalBrokerData_update
/** * Using another LocalBrokerData, update this. * * @param other * LocalBrokerData to update from. */ public void update(final LocalBrokerData other) { updateSystemResourceUsage(other.cpu, other.memory, other.directMemory, other.bandwidthIn, other.bandwidthOut); updateBundleData(other.lastStats); lastStats = other.lastStats; }
3.68
hadoop_BlockStorageMovementAttemptedItems_stop
/** * Sets running flag to false. Also, this will interrupt monitor thread and * clear all the queued up tasks. */ public synchronized void stop() { monitorRunning = false; if (timerThread != null) { timerThread.interrupt(); } this.clearQueues(); }
3.68
flink_MetricFetcherImpl_queryMetrics
/** * Query the metrics from the given QueryServiceGateway. * * @param queryServiceGateway to query for metrics */ private CompletableFuture<Void> queryMetrics( final MetricQueryServiceGateway queryServiceGateway) { LOG.debug("Query metrics for {}.", queryServiceGateway.getAddress()); return queryServiceGateway .queryMetrics(timeout) .thenComposeAsync( (MetricDumpSerialization.MetricSerializationResult result) -> { metrics.addAll(deserializer.deserialize(result)); return FutureUtils.completedVoidFuture(); }, executor); }
3.68
MagicPlugin_MagicConfigCommandExecutor_setSession
/** * Note that this gets called asynchronously */ protected void setSession(CommandSender sender, String session) { final Plugin plugin = magic.getPlugin(); plugin.getServer().getScheduler().runTask(plugin, new Runnable() { @Override public void run() { Mage mage = controller.getMage(sender); sessions.put(mage.getId(), session); } }); }
3.68
flink_JMXService_getInstance
/** Acquire the global singleton JMXServer instance. */ public static Optional<JMXServer> getInstance() { return Optional.ofNullable(jmxServer); }
3.68
querydsl_ProjectableSQLQuery_union
/** * Creates an union expression for the given subqueries * * @param <RT> * @param alias alias for union * @param sq subqueries * @return the current object */ @SuppressWarnings("unchecked") public <RT> Q union(Path<?> alias, SubQueryExpression<RT>... sq) { return from((Expression) UnionUtils.union(Arrays.asList(sq), (Path) alias, false)); }
3.68
flink_SecurityFactoryServiceLoader_findContextFactory
/** Find a suitable {@link SecurityContextFactory} based on canonical name. */ public static SecurityContextFactory findContextFactory(String securityContextFactoryClass) throws NoMatchSecurityFactoryException { return findFactoryInternal( securityContextFactoryClass, SecurityContextFactory.class, SecurityContextFactory.class.getClassLoader()); }
3.68
dubbo_ChannelBuffers_prefixEquals
// prefix public static boolean prefixEquals(ChannelBuffer bufferA, ChannelBuffer bufferB, int count) { final int aLen = bufferA.readableBytes(); final int bLen = bufferB.readableBytes(); if (aLen < count || bLen < count) { return false; } int aIndex = bufferA.readerIndex(); int bIndex = bufferB.readerIndex(); for (int i = count; i > 0; i--) { if (bufferA.getByte(aIndex) != bufferB.getByte(bIndex)) { return false; } aIndex++; bIndex++; } return true; }
3.68
hudi_Table_addAllRows
/** * Add all rows. * * @param rows Rows to be added * @return */ public Table addAllRows(List<Comparable[]> rows) { rows.forEach(r -> add(Arrays.asList(r))); return this; }
3.68
morf_Function_min
/** * Helper method to create an instance of the "minimum" SQL function. * * @param fieldToEvaluate the field to evaluate in the minimum function. This can be any expression resulting in a single column of data. * @return an instance of the minimum function */ public static Function min(AliasedField fieldToEvaluate) { return new Function(FunctionType.MIN, fieldToEvaluate); }
3.68
hadoop_HsController_logs
/** * Render the logs page. */ public void logs() { String logEntity = $(ENTITY_STRING); JobID jid = null; try { jid = JobID.forName(logEntity); set(JOB_ID, logEntity); requireJob(); } catch (Exception e) { // fall below } if (jid == null) { try { TaskAttemptID taskAttemptId = TaskAttemptID.forName(logEntity); set(TASK_ID, taskAttemptId.getTaskID().toString()); set(JOB_ID, taskAttemptId.getJobID().toString()); requireTask(); requireJob(); } catch (Exception e) { // fall below } } render(HsLogsPage.class); }
3.68
flink_StreamGraphUtils_validateTransformationUid
/** * Throw {@link IllegalStateException} if the {@link PhysicalTransformation}'s uid or hash is * not set when auto generate uid is disabled. * * @param streamGraph The given graph that the transformation is added to * @param transformation The transformation needed to validate */ public static void validateTransformationUid( StreamGraph streamGraph, Transformation<?> transformation) { if (!streamGraph.getExecutionConfig().hasAutoGeneratedUIDsEnabled()) { if (transformation instanceof PhysicalTransformation && transformation.getUserProvidedNodeHash() == null && transformation.getUid() == null) { throw new IllegalStateException( "Auto generated UIDs have been disabled " + "but no UID or hash has been assigned to operator " + transformation.getName()); } } }
3.68
hbase_SaslClientAuthenticationProviders_addExplicitProviders
/** * Extracts and instantiates authentication providers from the configuration. */ static void addExplicitProviders(Configuration conf, HashMap<Byte, SaslClientAuthenticationProvider> providers) { for (String implName : conf.getStringCollection(EXTRA_PROVIDERS_KEY)) { Class<?> clz; // Load the class from the config try { clz = Class.forName(implName); } catch (ClassNotFoundException e) { LOG.warn("Failed to load SaslClientAuthenticationProvider {}", implName, e); continue; } // Make sure it's the right type if (!SaslClientAuthenticationProvider.class.isAssignableFrom(clz)) { LOG.warn("Ignoring SaslClientAuthenticationProvider {} because it is not an instance of" + " SaslClientAuthenticationProvider", clz); continue; } // Instantiate it SaslClientAuthenticationProvider provider; try { provider = (SaslClientAuthenticationProvider) clz.getConstructor().newInstance(); } catch (InstantiationException | IllegalAccessException | NoSuchMethodException | InvocationTargetException e) { LOG.warn("Failed to instantiate SaslClientAuthenticationProvider {}", clz, e); continue; } // Add it to our set, only if it doesn't conflict with something else we've // already registered. addProviderIfNotExists(provider, providers); } }
3.68
hadoop_ComponentContainers_name
/** * Name of the service component. **/ public ComponentContainers name(String name) { this.componentName = name; return this; }
3.68
dubbo_HealthStatusManager_clearStatus
/** * Clears the health status record of a service. The health service will respond with NOT_FOUND * error on checking the status of a cleared service. * * @param service the name of some aspect of the server that is associated with a health status. * This name can have no relation with the gRPC services that the server is * running with. It can also be an empty String {@code ""} per the gRPC * specification. */ public void clearStatus(String service) { healthService.clearStatus(service); }
3.68
pulsar_MetadataStoreExtended_handleMetadataEvent
/** * Handles a metadata synchronizer event. * * @param event * @return completed future when the event is handled */ default CompletableFuture<Void> handleMetadataEvent(MetadataEvent event) { return CompletableFuture.completedFuture(null); }
3.68
pulsar_AuthenticationProviderOpenID_verifyIssuerAndGetJwk
/** * Verify the JWT's issuer (iss) claim is one of the allowed issuers and then retrieve the JWK from the issuer. If * not, see {@link FallbackDiscoveryMode} for the fallback behavior. * @param jwt - the token to use to discover the issuer's JWKS URI, which is then used to retrieve the issuer's * current public keys. * @return a JWK that can be used to verify the JWT's signature */ private CompletableFuture<Jwk> verifyIssuerAndGetJwk(DecodedJWT jwt) { if (jwt.getIssuer() == null) { incrementFailureMetric(AuthenticationExceptionCode.UNSUPPORTED_ISSUER); return CompletableFuture.failedFuture(new AuthenticationException("Issuer cannot be null")); } else if (this.issuers.contains(jwt.getIssuer())) { // Retrieve the metadata: https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata return openIDProviderMetadataCache.getOpenIDProviderMetadataForIssuer(jwt.getIssuer()) .thenCompose(metadata -> jwksCache.getJwk(metadata.getJwksUri(), jwt.getKeyId())); } else if (fallbackDiscoveryMode == FallbackDiscoveryMode.KUBERNETES_DISCOVER_TRUSTED_ISSUER) { return openIDProviderMetadataCache.getOpenIDProviderMetadataForKubernetesApiServer(jwt.getIssuer()) .thenCompose(metadata -> openIDProviderMetadataCache.getOpenIDProviderMetadataForIssuer(metadata.getIssuer())) .thenCompose(metadata -> jwksCache.getJwk(metadata.getJwksUri(), jwt.getKeyId())); } else if (fallbackDiscoveryMode == FallbackDiscoveryMode.KUBERNETES_DISCOVER_PUBLIC_KEYS) { return openIDProviderMetadataCache.getOpenIDProviderMetadataForKubernetesApiServer(jwt.getIssuer()) .thenCompose(__ -> jwksCache.getJwkFromKubernetesApiServer(jwt.getKeyId())); } else { incrementFailureMetric(AuthenticationExceptionCode.UNSUPPORTED_ISSUER); return CompletableFuture .failedFuture(new AuthenticationException("Issuer not allowed: " + jwt.getIssuer())); } }
3.68
framework_GridDropTarget_getDropMode
/** * Gets the drop mode of this drop target. * * @return Drop mode that describes the allowed drop locations within the * Grid's row. */ public DropMode getDropMode() { return getState(false).dropMode; }
3.68
flink_SqlFunctionUtils_struncate
/** SQL <code>TRUNCATE</code> operator applied to double values. */ public static double struncate(double b0) { return struncate(b0, 0); }
3.68
hadoop_TypedBytesOutput_writeByte
/** * Writes a byte as a typed bytes sequence. * * @param b the byte to be written * @throws IOException */ public void writeByte(byte b) throws IOException { out.write(Type.BYTE.code); out.write(b); }
3.68
dubbo_ServiceInstancesChangedListener_notifyAddressChanged
/** * race condition is protected by onEvent/doOnEvent */ protected void notifyAddressChanged() { MetricsEventBus.post(RegistryEvent.toNotifyEvent(applicationModel), () -> { Map<String, Integer> lastNumMap = new HashMap<>(); // 1 different services listeners.forEach((serviceKey, listenerSet) -> { // 2 multiple subscription listener of the same service for (NotifyListenerWithKey listenerWithKey : listenerSet) { NotifyListener notifyListener = listenerWithKey.getNotifyListener(); List<URL> urls = toUrlsWithEmpty( getAddresses(listenerWithKey.getProtocolServiceKey(), notifyListener.getConsumerUrl())); logger.info( "Notify service " + listenerWithKey.getProtocolServiceKey() + " with urls " + urls.size()); notifyListener.notify(urls); lastNumMap.put(serviceKey, urls.size()); } }); return lastNumMap; }); }
3.68
morf_HumanReadableStatementHelper_generateSelectStatementString
/** * Generates a string for a select, or sub-select, statement. The "select" prefixing keyword is optional; * depending on the context it is being used the result may be more readable if it is omitted. * * @param statement the select statement to describe. * @param prefix {@code true} to include a leading "select ", {@code false} otherwise. * @return the string. */ @SuppressWarnings("rawtypes") private static String generateSelectStatementString(final AbstractSelectStatement<?> statement, final boolean prefix) { final StringBuilder sb = new StringBuilder(); if (prefix) { sb.append("select"); } if ((AbstractSelectStatement)statement instanceof SelectFirstStatement) { if (sb.length() > 0) { sb.append(' '); } sb.append("first"); } boolean comma = false; for (AliasedField field : statement.getFields()) { if (comma) { sb.append(", "); } else { comma = true; if (sb.length() > 0) { sb.append(' '); } } sb.append(generateFieldSymbolString(field)); } sb.append(generateFromAndWhereClause(statement, true)); sb.append(generateOrderByClause(statement.getOrderBys())); return sb.toString(); }
3.68
hadoop_SQLDelegationTokenSecretManager_storeDelegationKey
/** * Persists a DelegationKey into the SQL database. The delegation keyId * is expected to be unique and any duplicate key attempts will result * in an IOException. * @param key DelegationKey to persist into the SQL database. */ @Override protected void storeDelegationKey(DelegationKey key) throws IOException { try (ByteArrayOutputStream bos = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(bos)) { key.write(dos); // Add delegation key to SQL database insertDelegationKey(key.getKeyId(), bos.toByteArray()); // Add delegation key to local cache super.storeDelegationKey(key); } catch (SQLException e) { throw new IOException("Failed to store delegation key in SQL secret manager", e); } }
3.68
flink_AsyncWaitOperator_addToWorkQueue
/** * Add the given stream element to the operator's stream element queue. This operation blocks * until the element has been added. * * <p>Between two insertion attempts, this method yields the execution to the mailbox, such that * events as well as asynchronous results can be processed. * * @param streamElement to add to the operator's queue * @throws InterruptedException if the current thread has been interrupted while yielding to * mailbox * @return a handle that allows to set the result of the async computation for the given * element. */ private ResultFuture<OUT> addToWorkQueue(StreamElement streamElement) throws InterruptedException { Optional<ResultFuture<OUT>> queueEntry; while (!(queueEntry = queue.tryPut(streamElement)).isPresent()) { mailboxExecutor.yield(); } return queueEntry.get(); }
3.68
graphhopper_InstructionsOutgoingEdges_getSpeed
/** * Will return the tagged maxspeed, if available, if not, we use the average speed * TODO: Should we rely only on the tagged maxspeed? */ private double getSpeed(EdgeIteratorState edge) { double maxSpeed = edge.get(maxSpeedEnc); if (Double.isInfinite(maxSpeed)) return edge.getDistance() / weighting.calcEdgeMillis(edge, false) * 3600; return maxSpeed; }
3.68
hbase_HFileBlock_touch
/** * Calling this method in strategic locations where HFileBlocks are referenced may help diagnose * potential buffer leaks. We pass the block itself as a default hint, but one can use * {@link #touch(Object)} to pass their own hint as well. */ @Override public HFileBlock touch() { return touch(this); }
3.68
AreaShop_RegionSign_getMaterial
/** * Get the material of the sign as saved in the config. * @return Material of the sign, usually {@link Material#WALL_SIGN}, {@link Material#SIGN}, or one of the other wood types (different result for 1.13-), Material.AIR if none. */ public Material getMaterial() { String name = getRegion().getConfig().getString("general.signs." + key + ".signType"); Material result = Materials.signNameToMaterial(name); return result == null ? Material.AIR : result; }
3.68
hudi_DagScheduler_executeNode
/** * Execute the given node. * * @param node The node to be executed */ protected void executeNode(DagNode node, int curRound) { if (node.isCompleted()) { throw new RuntimeException("DagNode already completed! Cannot re-execute"); } try { int repeatCount = node.getConfig().getRepeatCount(); while (repeatCount > 0) { node.execute(executionContext, curRound); log.info("Finished executing {}", node.getName()); repeatCount--; } node.setCompleted(true); } catch (Exception e) { log.error("Exception executing node", e); throw new HoodieException(e); } }
3.68
hbase_ReplicationPeerConfigUtil_toByteArray
/** * Returns Serialized protobuf of <code>peerConfig</code> with pb magic prefix prepended suitable * for use as content of a this.peersZNode; i.e. the content of PEER_ID znode under * /hbase/replication/peers/PEER_ID */ public static byte[] toByteArray(final ReplicationPeerConfig peerConfig) { byte[] bytes = convert(peerConfig).toByteArray(); return ProtobufUtil.prependPBMagic(bytes); }
3.68
pulsar_SecurityUtility_getBCProviderFromClassPath
/** * Get Bouncy Castle provider from classpath, and call Security.addProvider. * Throw Exception if failed. */ public static Provider getBCProviderFromClassPath() throws Exception { Class clazz; try { // prefer non FIPS, for backward compatibility concern. clazz = Class.forName(BC_NON_FIPS_PROVIDER_CLASS); } catch (ClassNotFoundException cnf) { log.warn("Not able to get Bouncy Castle provider: {}, try to get FIPS provider {}", BC_NON_FIPS_PROVIDER_CLASS, BC_FIPS_PROVIDER_CLASS); // attempt to use the FIPS provider. clazz = Class.forName(BC_FIPS_PROVIDER_CLASS); } Provider provider = (Provider) clazz.getDeclaredConstructor().newInstance(); Security.addProvider(provider); if (log.isDebugEnabled()) { log.debug("Found and Instantiated Bouncy Castle provider in classpath {}", provider.getName()); } return provider; }
3.68
graphhopper_LMApproximator_initCollections
// We only expect a very short search @Override protected void initCollections(int size) { super.initCollections(2); }
3.68
flink_RpcEndpoint_runAsync
/** * Execute the runnable in the main thread of the underlying RPC endpoint. * * @param runnable Runnable to be executed in the main thread of the underlying RPC endpoint */ protected void runAsync(Runnable runnable) { rpcServer.runAsync(runnable); }
3.68
flink_SkipListUtils_getNextKeyPointer
/** * Returns the next key pointer on level 0. * * @param memorySegment memory segment for key space. * @param offset offset of key space in the memory segment. */ public static long getNextKeyPointer(MemorySegment memorySegment, int offset) { return memorySegment.getLong(offset + NEXT_KEY_POINTER_OFFSET); }
3.68
morf_AbstractSqlDialectTest_testSelectNotWhereScript
/** * Tests a select with a "not" where clause. */ @Test public void testSelectNotWhereScript() { SelectStatement stmt = new SelectStatement().from(new TableReference(TEST_TABLE)) .where(not( eq(new FieldReference(STRING_FIELD), "A0001") )); String value = varCharCast("'A0001'"); String expectedSql = "SELECT * FROM " + tableName(TEST_TABLE) + " WHERE (NOT (stringField = " + stringLiteralPrefix() + value+"))"; assertEquals("Select using a where not clause", expectedSql, testDialect.convertStatementToSQL(stmt)); }
3.68
hadoop_DiskBalancerDataNode_getDataNodeUUID
/** * Returns datanode ID. **/ public String getDataNodeUUID() { return dataNodeUUID; }
3.68
hadoop_DistributedCache_getFileStatus
/** * Returns {@link FileStatus} of a given cache file on hdfs. Internal to * MapReduce. * @param conf configuration * @param cache cache file * @return <code>FileStatus</code> of a given cache file on hdfs * @throws IOException */ @Deprecated public static FileStatus getFileStatus(Configuration conf, URI cache) throws IOException { FileSystem fileSystem = FileSystem.get(cache, conf); return fileSystem.getFileStatus(new Path(cache.getPath())); }
3.68
pulsar_ProxyExtensionsUtils_getProxyExtensionDefinition
/** * Retrieve the extension definition from the provided handler nar package. * * @param narPath the path to the extension NAR package * @return the extension definition * @throws IOException when fail to load the extension or get the definition */ public static ProxyExtensionDefinition getProxyExtensionDefinition(String narPath, String narExtractionDirectory) throws IOException { try (NarClassLoader ncl = NarClassLoaderBuilder.builder() .narFile(new File(narPath)) .extractionDirectory(narExtractionDirectory) .build();) { return getProxyExtensionDefinition(ncl); } }
3.68
morf_SchemaChangeSequence_visit
/** * @see org.alfasoftware.morf.upgrade.SchemaChangeVisitor#visit(org.alfasoftware.morf.upgrade.AddTableFrom) */ @Override public void visit(AddTableFrom addTableFrom) { changes.add(addTableFrom); }
3.68
hadoop_S3ClientFactory_getMinimumPartSize
/** * Get the minimum part size for transfer parts. * @return part size */ public long getMinimumPartSize() { return minimumPartSize; }
3.68
druid_StatementProxyImpl_getUpdateCount
// bug fixed for oracle @Override public int getUpdateCount() throws SQLException { if (updateCount == null) { FilterChainImpl chain = createChain(); updateCount = chain.statement_getUpdateCount(this); recycleFilterChain(chain); } return updateCount; }
3.68
hudi_CompactionAdminClient_repairCompaction
/** * Renames delta files to make file-slices consistent with the timeline as dictated by Hoodie metadata. Use when * compaction unschedule fails partially. * * This operation MUST be executed with compactions and writer turned OFF. * * @param compactionInstant Compaction Instant to be repaired * @param dryRun Dry Run Mode */ public List<RenameOpResult> repairCompaction(String compactionInstant, int parallelism, boolean dryRun) throws Exception { HoodieTableMetaClient metaClient = createMetaClient(false); validateCompactionPlan(metaClient, compactionInstant, parallelism); return new ArrayList<>(); }
3.68
hbase_RegionServerSpaceQuotaManager_enforceViolationPolicy
/** * Enforces the given violationPolicy on the given table in this RegionServer. */ public void enforceViolationPolicy(TableName tableName, SpaceQuotaSnapshot snapshot) { SpaceQuotaStatus status = snapshot.getQuotaStatus(); if (!status.isInViolation()) { throw new IllegalStateException( tableName + " is not in violation. Violation policy should not be enabled."); } if (LOG.isTraceEnabled()) { LOG.trace("Enabling violation policy enforcement on " + tableName + " with policy " + status.getPolicy()); } // Construct this outside of the lock final SpaceViolationPolicyEnforcement enforcement = getFactory().create(getRegionServerServices(), tableName, snapshot); // "Enables" the policy // HBASE-XXXX: Should this synchronize on the actual table name instead of the map? That would // allow policy enable/disable on different tables to happen concurrently. As written now, only // one table will be allowed to transition at a time. This is probably OK, but not sure if // it would become a bottleneck at large clusters/number of tables. synchronized (enforcedPolicies) { try { enforcement.enable(); } catch (IOException e) { LOG.error("Failed to enable space violation policy for " + tableName + ". This table will not enter violation.", e); return; } enforcedPolicies.put(tableName, enforcement); } }
3.68
hudi_QuickstartUtils_generateRandomValue
/** * Generates a new avro record of the above schema format, retaining the key if optionally provided. The * riderDriverSuffix string is a random String to simulate updates by changing the rider driver fields for records * belonging to the same commit. It is purely used for demo purposes. In real world, the actual updates are assumed * to be provided based on the application requirements. */ public static OverwriteWithLatestAvroPayload generateRandomValue(HoodieKey key, String riderDriverSuffix) throws IOException { // The timestamp generated is limited to range from 7 days before to now, to avoid generating too many // partitionPaths when user use timestamp as partitionPath filed. GenericRecord rec = generateGenericRecord(key.getRecordKey(), "rider-" + riderDriverSuffix, "driver-" + riderDriverSuffix, generateRangeRandomTimestamp(7)); return new OverwriteWithLatestAvroPayload(Option.of(rec)); }
3.68
hbase_MultiByteBuff_getItemIndexFromCurItemIndex
/* * Returns in which sub ByteBuffer, the given element index will be available. In this case we are * sure that the item will be after MBB's current position */ private int getItemIndexFromCurItemIndex(int elemIndex) { int index = this.curItemIndex; while (elemIndex >= this.itemBeginPos[index]) { index++; if (index == this.itemBeginPos.length) { throw new IndexOutOfBoundsException(); } } return index - 1; }
3.68
framework_Tree_isHtmlContentAllowed
/** * Checks whether captions are interpreted as html or plain text. * * @since 7.6 * @return <code>true</code> if the captions are displayed as html, * <code>false</code> if displayed as plain text * @see #setHtmlContentAllowed(boolean) */ public boolean isHtmlContentAllowed() { return htmlContentAllowed; }
3.68
hadoop_RpcServerException_getRpcStatusProto
/** * @return get the rpc status corresponding to this exception. */ public RpcStatusProto getRpcStatusProto() { return RpcStatusProto.ERROR; }
3.68
flink_ProjectOperator_projectTuple3
/** * Projects a {@link Tuple} {@link DataSet} to the previously selected fields. * * @return The projected DataSet. * @see Tuple * @see DataSet */ public <T0, T1, T2> ProjectOperator<T, Tuple3<T0, T1, T2>> projectTuple3() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType()); TupleTypeInfo<Tuple3<T0, T1, T2>> tType = new TupleTypeInfo<Tuple3<T0, T1, T2>>(fTypes); return new ProjectOperator<T, Tuple3<T0, T1, T2>>(this.ds, this.fieldIndexes, tType); }
3.68
hbase_WALEdit_isMetaEdit
/** * @return True if this is a meta edit; has one edit only and its columnfamily is * {@link #METAFAMILY}. */ public boolean isMetaEdit() { return this.families != null && this.families.size() == 1 && this.families.contains(METAFAMILY); }
3.68
flink_BeamPythonFunctionRunner_createPythonExecutionEnvironment
/** * Creates a specification which specifies the portability Python execution environment. It's * used by Beam's portability framework to creates the actual Python execution environment. */ private RunnerApi.Environment createPythonExecutionEnvironment( ReadableConfig config, long memoryLimitBytes) throws Exception { PythonEnvironment environment = environmentManager.createEnvironment(); if (environment instanceof ProcessPythonEnvironment) { ProcessPythonEnvironment processEnvironment = (ProcessPythonEnvironment) environment; Map<String, String> env = processEnvironment.getEnv(); config.getOptional(PythonOptions.PYTHON_JOB_OPTIONS).ifPresent(env::putAll); env.put(PYTHON_WORKER_MEMORY_LIMIT, String.valueOf(memoryLimitBytes)); return Environments.createProcessEnvironment( "", "", processEnvironment.getCommand(), env); } throw new RuntimeException("Currently only ProcessPythonEnvironment is supported."); }
3.68
flink_AbstractStreamOperator_processLatencyMarker
// ------- One input stream public void processLatencyMarker(LatencyMarker latencyMarker) throws Exception { reportOrForwardLatencyMarker(latencyMarker); }
3.68
framework_MouseEvents_isCtrlKey
/** * Checks if the Ctrl key was down when the mouse event took place. * * @return true if Ctrl was pressed when the event occurred, false * otherwise */ public boolean isCtrlKey() { return details.isCtrlKey(); }
3.68
morf_RecordHelper_recordValueToJavaType
/** * Take a string value retrieved from a {@link Record} and convert it to a java value of the specified * type. * * @param stringValue The value retrieved from a {@link Record}. * @param type The Java class to use for the result. * @param <T> The Java type corresponding to the supplied Class * @return The typed java value. */ @SuppressWarnings("unchecked") public static <T> T recordValueToJavaType(String stringValue, Class<T> type) { if (type == Integer.class) { return (T)Integer.valueOf(stringValue); } else if (type == Long.class) { return (T)Long.valueOf(stringValue); } else if (type == Boolean.class) { return (T)Boolean.valueOf(stringValue); } else if (type == LocalDate.class) { return (T)LocalDate.parse(stringValue, FROM_YYYY_MM_DD); } else if (type == Double.class) { return (T)Double.valueOf(stringValue); } else { return (T)stringValue; } }
3.68