name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hbase_InfoServer_canUserModifyUI
/** * Returns true if and only if UI authentication (spnego) is enabled, UI authorization is enabled, * and the requesting user is defined as an administrator. If the UI is set to readonly, this * method always returns false. */ public static boolean canUserModifyUI(HttpServletRequest req, ServletContext ctx, Configuration conf) { if (conf.getBoolean("hbase.master.ui.readonly", false)) { return false; } String remoteUser = req.getRemoteUser(); if ( "kerberos".equalsIgnoreCase(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) && conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false) && remoteUser != null ) { return HttpServer.userHasAdministratorAccess(ctx, remoteUser); } return false; }
3.68
pulsar_BrokerService_setupTopicPublishRateLimiterMonitor
/** * Schedules and monitors publish-throttling for all owned topics that has publish-throttling configured. It also * disables and shutdowns publish-rate-limiter monitor task if broker disables it. */ public void setupTopicPublishRateLimiterMonitor() { // set topic PublishRateLimiterMonitor long topicTickTimeMs = pulsar().getConfiguration().getTopicPublisherThrottlingTickTimeMillis(); if (topicTickTimeMs > 0) { topicPublishRateLimiterMonitor.startOrUpdate(topicTickTimeMs, this::checkTopicPublishThrottlingRate, this::refreshTopicPublishRate); } else { // disable publish-throttling for all topics topicPublishRateLimiterMonitor.stop(); } }
3.68
flink_AfterMatchSkipStrategy_skipToLast
/** * Discards every partial match that started before the last event of emitted match mapped to * *PatternName*. * * @param patternName the pattern name to skip to * @return the created AfterMatchSkipStrategy */ public static SkipToLastStrategy skipToLast(String patternName) { return new SkipToLastStrategy(patternName, false); }
3.68
flink_HiveSetProcessor_isHidden
/* * Checks if the value contains any of the PASSWORD_STRINGS and if yes * return true */ private static boolean isHidden(String key) { for (String p : PASSWORD_STRINGS) { if (key.toLowerCase().contains(p)) { return true; } } return false; }
3.68
hbase_ZKUtil_createAndWatch
/** * Creates the specified node with the specified data and watches it. * <p> * Throws an exception if the node already exists. * <p> * The node created is persistent and open access. * <p> * Returns the version number of the created node if successful. * @param zkw zk reference * @param znode path of node to create * @param data data of node to create * @return version of node created * @throws KeeperException if unexpected zookeeper exception * @throws KeeperException.NodeExistsException if node already exists */ public static int createAndWatch(ZKWatcher zkw, String znode, byte[] data) throws KeeperException, KeeperException.NodeExistsException { try { zkw.getRecoverableZooKeeper().create(znode, data, zkw.createACL(znode), CreateMode.PERSISTENT); Stat stat = zkw.getRecoverableZooKeeper().exists(znode, zkw); if (stat == null) { // Likely a race condition. Someone deleted the znode. throw KeeperException.create(KeeperException.Code.SYSTEMERROR, "ZK.exists returned null (i.e.: znode does not exist) for znode=" + znode); } return stat.getVersion(); } catch (InterruptedException e) { zkw.interruptedException(e); return -1; } }
3.68
zxing_Detector_getColor
/** * Gets the color of a segment * * @return 1 if segment more than 90% black, -1 if segment is more than 90% white, 0 else */ private int getColor(Point p1, Point p2) { float d = distance(p1, p2); if (d == 0.0f) { return 0; } float dx = (p2.getX() - p1.getX()) / d; float dy = (p2.getY() - p1.getY()) / d; int error = 0; float px = p1.getX(); float py = p1.getY(); boolean colorModel = image.get(p1.getX(), p1.getY()); int iMax = (int) Math.floor(d); for (int i = 0; i < iMax; i++) { if (image.get(MathUtils.round(px), MathUtils.round(py)) != colorModel) { error++; } px += dx; py += dy; } float errRatio = error / d; if (errRatio > 0.1f && errRatio < 0.9f) { return 0; } return (errRatio <= 0.1f) == colorModel ? 1 : -1; }
3.68
flink_DelimitedInputFormat_loadGlobalConfigParams
/** @deprecated Please use {@code loadConfigParameters(Configuration config} */ @Deprecated protected static void loadGlobalConfigParams() { loadConfigParameters(GlobalConfiguration.loadConfiguration()); }
3.68
pulsar_ProducerConfiguration_isEncryptionEnabled
/** * * Returns true if encryption keys are added. * */ public boolean isEncryptionEnabled() { return conf.isEncryptionEnabled(); }
3.68
framework_AbstractContainer_addPropertySetChangeListener
/** * Implementation of the corresponding method in * {@link PropertySetChangeNotifier}, override with the corresponding public * method and implement the interface to use this. * * @see PropertySetChangeNotifier#addListener(Container.PropertySetChangeListener) */ protected void addPropertySetChangeListener( Container.PropertySetChangeListener listener) { if (getPropertySetChangeListeners() == null) { setPropertySetChangeListeners( new LinkedList<Container.PropertySetChangeListener>()); } getPropertySetChangeListeners().add(listener); }
3.68
hadoop_DatanodeVolumeInfo_getFreeSpace
/** * get free space. */ public long getFreeSpace() { return freeSpace; }
3.68
hbase_FSTableDescriptors_getByNamespace
/** * Find descriptors by namespace. * @see #get(org.apache.hadoop.hbase.TableName) */ @Override public Map<String, TableDescriptor> getByNamespace(String name) throws IOException { Map<String, TableDescriptor> htds = new TreeMap<>(); List<Path> tableDirs = FSUtils.getLocalTableDirs(fs, CommonFSUtils.getNamespaceDir(rootdir, name)); for (Path d : tableDirs) { TableDescriptor htd = get(CommonFSUtils.getTableName(d)); if (htd == null) { continue; } htds.put(CommonFSUtils.getTableName(d).getNameAsString(), htd); } return htds; }
3.68
hbase_ConnectionUtils_createCloseRowBefore
/** * Create a row before the specified row and very close to the specified row. */ static byte[] createCloseRowBefore(byte[] row) { if (row.length == 0) { return MAX_BYTE_ARRAY; } if (row[row.length - 1] == 0) { return Arrays.copyOf(row, row.length - 1); } else { byte[] nextRow = new byte[row.length + MAX_BYTE_ARRAY.length]; System.arraycopy(row, 0, nextRow, 0, row.length - 1); nextRow[row.length - 1] = (byte) ((row[row.length - 1] & 0xFF) - 1); System.arraycopy(MAX_BYTE_ARRAY, 0, nextRow, row.length, MAX_BYTE_ARRAY.length); return nextRow; } }
3.68
hbase_MetricsTableRequests_updateAppend
/** * Update the Append time histogram. * @param time time it took * @param blockBytesScanned size of block bytes scanned to retrieve the response */ public void updateAppend(long time, long blockBytesScanned) { if (isEnableTableLatenciesMetrics()) { appendTimeHistogram.update(time); if (blockBytesScanned > 0) { blockBytesScannedCount.increment(blockBytesScanned); appendBlockBytesScanned.update(blockBytesScanned); } } }
3.68
starts_JavaAgent_agentmain
/** * This method is invoked if we start the agent after the VM already started. * We use this method to hijack the surefire instance being run, so that we * can set its argLine correctly all the time. * * @param options The options that we pass to the agent * @param instrumentation The instrumentation instance */ public static void agentmain(String options, Instrumentation instrumentation) { instrumentation.addTransformer(new MavenCFT(), true); instrumentMaven(instrumentation); }
3.68
morf_SqlDialect_deleteAllFromTableStatements
/** * Creates SQL to delete all records from a table (doesn't use truncate). * * @param table the database table to clear * @return SQL statements required to clear the table. */ public Collection<String> deleteAllFromTableStatements(Table table) { return ImmutableList.of("DELETE FROM " + schemaNamePrefix(table) + table.getName()); }
3.68
hbase_StoreFileListFile_update
/** * We will set the timestamp in this method so just pass the builder in */ void update(StoreFileList.Builder builder) throws IOException { if (nextTrackFile < 0) { // we need to call load first to load the prevTimestamp and also the next file // we are already in the update method, which is not read only, so pass false load(false); } long timestamp = Math.max(prevTimestamp + 1, EnvironmentEdgeManager.currentTime()); byte[] actualData = builder.setTimestamp(timestamp).build().toByteArray(); CRC32 crc32 = new CRC32(); crc32.update(actualData); int checksum = (int) crc32.getValue(); // 4 bytes length at the beginning, plus 4 bytes checksum FileSystem fs = ctx.getRegionFileSystem().getFileSystem(); try (FSDataOutputStream out = fs.create(trackFiles[nextTrackFile], true)) { out.writeInt(actualData.length); out.write(actualData); out.writeInt(checksum); } // record timestamp prevTimestamp = timestamp; // rotate the file nextTrackFile = 1 - nextTrackFile; try { fs.delete(trackFiles[nextTrackFile], false); } catch (IOException e) { // we will create new file with overwrite = true, so not a big deal here, only for speed up // loading as we do not need to read this file when loading LOG.debug("Failed to delete old track file {}, ignoring the exception", trackFiles[nextTrackFile], e); } }
3.68
hbase_AsyncNonMetaRegionLocator_onScanNext
// return whether we should stop the scan private boolean onScanNext(TableName tableName, LocateRequest req, Result result) { RegionLocations locs = CatalogFamilyFormat.getRegionLocations(result); if (LOG.isDebugEnabled()) { LOG.debug("The fetched location of '{}', row='{}', locateType={} is {}", tableName, Bytes.toStringBinary(req.row), req.locateType, locs); } // remove HRegionLocation with null location, i.e, getServerName returns null. if (locs != null) { locs = locs.removeElementsWithNullLocation(); } // the default region location should always be presented when fetching from meta, otherwise // let's fail the request. if (locs == null || locs.getDefaultRegionLocation() == null) { complete(tableName, req, null, new HBaseIOException(String.format("No location found for '%s', row='%s', locateType=%s", tableName, Bytes.toStringBinary(req.row), req.locateType))); return true; } HRegionLocation loc = locs.getDefaultRegionLocation(); RegionInfo info = loc.getRegion(); if (info == null) { complete(tableName, req, null, new HBaseIOException(String.format("HRegionInfo is null for '%s', row='%s', locateType=%s", tableName, Bytes.toStringBinary(req.row), req.locateType))); return true; } if (info.isSplitParent()) { return false; } complete(tableName, req, locs, null); return true; }
3.68
framework_VTabsheet_getFirstVisibleTab
/** * Returns the index of the first visible tab on the server. * * @return the index, or {@code -1} if not found */ private int getFirstVisibleTab() { return getNextVisibleTab(-1); }
3.68
hadoop_V2Migration_v1RequestHandlersUsed
/** * Notes use of request handlers. * @param handlers handlers declared */ public static void v1RequestHandlersUsed(final String handlers) { WARN_OF_REQUEST_HANDLERS.warn( "Ignoring V1 SDK request handlers set in {}: {}", AUDIT_REQUEST_HANDLERS, handlers); }
3.68
hadoop_FederationProtocolPBTranslator_setProto
/** * Called if this translator is to be created from an existing protobuf byte * stream. * * @param p The existing proto object to use to initialize the translator. * @throws IllegalArgumentException If the given proto message is not instance of the class of * the proto handler this translator holds. */ @SuppressWarnings("unchecked") public void setProto(Message p) { if (protoClass.isInstance(p)) { if (this.builder != null) { // Merge with builder this.builder.mergeFrom((P) p); } else { // Store proto this.proto = (P) p; } } else { throw new IllegalArgumentException( "Cannot decode proto type " + p.getClass().getName()); } }
3.68
flink_ConnectionUtils_tryLocalHostBeforeReturning
/** * This utility method tries to connect to the JobManager using the InetAddress returned by * InetAddress.getLocalHost(). The purpose of the utility is to have a final try connecting to * the target address using the LocalHost before using the address returned. We do a second try * because the JM might have been unavailable during the first check. * * @param preliminaryResult The address detected by the heuristic * @return either the preliminaryResult or the address returned by InetAddress.getLocalHost() * (if we are able to connect to targetAddress from there) */ private static InetAddress tryLocalHostBeforeReturning( InetAddress preliminaryResult, SocketAddress targetAddress, boolean logging) throws IOException { InetAddress localhostName = InetAddress.getLocalHost(); if (preliminaryResult.equals(localhostName)) { // preliminary result is equal to the local host name return preliminaryResult; } else if (tryToConnect( localhostName, targetAddress, AddressDetectionState.SLOW_CONNECT.getTimeout(), logging)) { // success, we were able to use local host to connect LOG.debug( "Preferring {} (InetAddress.getLocalHost()) for local bind point over previous candidate {}", localhostName, preliminaryResult); return localhostName; } else { // we have to make the preliminary result the final result return preliminaryResult; } }
3.68
morf_AbstractSqlDialectTest_expectedHints2
/** * @param rowCount The number of rows for which to optimise the query plan. * @return The expected SQL for the {@link SelectStatement#optimiseForRowCount(int)} directive. */ protected String expectedHints2(@SuppressWarnings("unused") int rowCount) { return "SELECT a, b FROM " + tableName("Foo") + " ORDER BY a FOR UPDATE"; }
3.68
rocketmq-connect_WorkerSinkTask_onCommitCompleted
/** * commit * * @param error * @param seqno * @param committedOffsets */ private void onCommitCompleted(Throwable error, long seqno, Map<MessageQueue, Long> committedOffsets) { if (commitSeqno != seqno) { // skip this commit sinkTaskMetricsGroup.recordOffsetCommitSkip(); return; } if (error != null) { log.error("{} An exception was thrown when committing commit offset, sequence number {}: {}", this, seqno, committedOffsets, error); recordCommitFailure(System.currentTimeMillis() - commitStarted); } else { log.debug("{} Finished offset commit successfully in {} ms for sequence number {}: {}", this, System.currentTimeMillis() - commitStarted, seqno, committedOffsets); if (committedOffsets != null) { lastCommittedOffsets.putAll(committedOffsets); log.debug("{} Last committed offsets are now {}", this, committedOffsets); } sinkTaskMetricsGroup.recordOffsetCommitSuccess(); } committing = false; }
3.68
hadoop_ManifestSuccessData_getFilenames
/** * @return a list of filenames in the commit. */ public List<String> getFilenames() { return filenames; }
3.68
hadoop_AppToFlowRowKey_getRowKey
/** * Constructs a row key prefix for the app_flow table. * * @return byte array with the row key */ public byte[] getRowKey() { return appIdKeyConverter.encode(appId); }
3.68
flink_ScalaCsvOutputFormat_toString
// -------------------------------------------------------------------------------------------- @Override public String toString() { return "CsvOutputFormat (path: " + this.getOutputFilePath() + ", delimiter: " + this.fieldDelimiter + ")"; }
3.68
hbase_FlushSnapshotSubprocedure_releaseBarrier
/** * Hooray! */ public void releaseBarrier() { // NO OP }
3.68
framework_CalendarComponentEvents_getNewStartTime
/** * @deprecated Use {@link #getNewStart()} instead * * @return the new start time */ @Deprecated public Date getNewStartTime() { return startTime; }
3.68
hbase_LruAdaptiveBlockCache_clearCache
/** Clears the cache. Used in tests. */ public void clearCache() { this.map.clear(); this.elements.set(0); }
3.68
framework_StreamResource_setFilename
/** * Sets the filename. * * @param filename * the filename to set. */ public void setFilename(String filename) { this.filename = filename; }
3.68
framework_ContainerOrderedWrapper_getContainerPropertyIds
/* * Gets the ID's of all Properties stored in the Container Don't add a * JavaDoc comment here, we use the default documentation from implemented * interface. */ @Override public Collection<?> getContainerPropertyIds() { return container.getContainerPropertyIds(); }
3.68
hbase_LruBlockCache_isEnteringRun
/** * Used for the test. */ boolean isEnteringRun() { return this.enteringRun; }
3.68
flink_Trigger_onMerge
/** * Called when several windows have been merged into one window by the {@link * org.apache.flink.streaming.api.windowing.assigners.WindowAssigner}. * * @param window The new window that results from the merge. * @param ctx A context object that can be used to register timer callbacks and access state. */ public void onMerge(W window, OnMergeContext ctx) throws Exception { throw new UnsupportedOperationException("This trigger does not support merging."); }
3.68
framework_SQLContainer_removeListener
/** * @deprecated As of 7.0, replaced by * {@link #removeRowIdChangeListener(RowIdChangeListener)} */ @Deprecated public void removeListener(RowIdChangeListener listener) { removeRowIdChangeListener(listener); }
3.68
hbase_HttpServer_getParameterNames
/** * Return the set of parameter names, quoting each name. */ @Override public Enumeration<String> getParameterNames() { return new Enumeration<String>() { private Enumeration<String> rawIterator = rawRequest.getParameterNames(); @Override public boolean hasMoreElements() { return rawIterator.hasMoreElements(); } @Override public String nextElement() { return HtmlQuoting.quoteHtmlChars(rawIterator.nextElement()); } }; }
3.68
hbase_BlockCache_isAlreadyCached
/** * Checks whether the block for the passed key is already cached. This method may not be * overridden by all implementing classes. In such cases, the returned Optional will be empty. For * subclasses implementing this logic, the returned Optional would contain the boolean value * reflecting if the block for the passed key is already cached or not. * @param key for the block we want to check if it's already in the cache. * @return empty optional if this method is not supported, otherwise the returned optional * contains the boolean value informing if the block is already cached. */ default Optional<Boolean> isAlreadyCached(BlockCacheKey key) { return Optional.empty(); }
3.68
hbase_CellModel_hasUserTimestamp
/** Returns true if the timestamp property has been specified by the user */ public boolean hasUserTimestamp() { return timestamp != HConstants.LATEST_TIMESTAMP; }
3.68
flink_StreamElement_asWatermark
/** * Casts this element into a Watermark. * * @return This element as a Watermark. * @throws java.lang.ClassCastException Thrown, if this element is actually not a Watermark. */ public final Watermark asWatermark() { return (Watermark) this; }
3.68
hbase_IdentityTableMapper_initJob
/** * Use this before submitting a TableMap job. It will appropriately set up the job. * @param table The table name. * @param scan The scan with the columns to scan. * @param mapper The mapper class. * @param job The job configuration. * @throws IOException When setting up the job fails. */ @SuppressWarnings("rawtypes") public static void initJob(String table, Scan scan, Class<? extends TableMapper> mapper, Job job) throws IOException { TableMapReduceUtil.initTableMapperJob(table, scan, mapper, ImmutableBytesWritable.class, Result.class, job); }
3.68
hbase_HBaseCommonTestingUtility_getDataTestDir
/** * @param name the name of a subdirectory or file in the test data directory * @return Path to a subdirectory or file named {code subdirName} under {@link #getDataTestDir()}. * Does *NOT* create the directory or file if it does not exist. */ public Path getDataTestDir(final String name) { return new Path(getDataTestDir(), name); }
3.68
hadoop_ManifestCommitter_hasCapability
/** * The committer is compatible with spark's dynamic partitioning * algorithm. * @param capability string to query the stream support for. * @return true if the requested capability is supported. */ @Override public boolean hasCapability(final String capability) { return CAPABILITY_DYNAMIC_PARTITIONING.equals(capability); }
3.68
hbase_CompactionRequestImpl_setPriority
/** Sets the priority for the request */ public void setPriority(int p) { this.priority = p; }
3.68
cron-utils_FieldDefinitionBuilder_withStrictRange
/** * Specifies that defined range for given field must be a strict range. * We understand strict range as a range defined as: "lowValue - highValue" * If some range value such as "highValue-lowValue" is specified in a field, it will fail to parse the field. * * @return same FieldDefinitionBuilder instance */ public FieldDefinitionBuilder withStrictRange() { constraints.withStrictRange(); return this; }
3.68
dubbo_BasicJsonWriter_indent
/** * Increase the indentation level. */ private IndentingWriter indent() { this.level++; return refreshIndent(); }
3.68
flink_ClientUtils_reportHeartbeatPeriodically
/** * The client reports the heartbeat to the dispatcher for aliveness. * * @param jobClient The job client. * @param interval The heartbeat interval. * @param timeout The heartbeat timeout. * @return The ScheduledExecutorService which reports heartbeat periodically. */ public static ScheduledExecutorService reportHeartbeatPeriodically( JobClient jobClient, long interval, long timeout) { checkArgument( interval < timeout, "The client's heartbeat interval " + "should be less than the heartbeat timeout. Please adjust the param '" + ClientOptions.CLIENT_HEARTBEAT_INTERVAL + "' or '" + ClientOptions.CLIENT_HEARTBEAT_TIMEOUT + "'"); JobID jobID = jobClient.getJobID(); LOG.info("Begin to report client's heartbeat for the job {}.", jobID); ScheduledExecutorService scheduledExecutor = Executors.newSingleThreadScheduledExecutor(); scheduledExecutor.scheduleAtFixedRate( () -> { LOG.debug("Report client's heartbeat for the job {}.", jobID); jobClient.reportHeartbeat(System.currentTimeMillis() + timeout); }, interval, interval, TimeUnit.MILLISECONDS); return scheduledExecutor; }
3.68
hbase_AbstractHBaseTool_doStaticMain
/** Call this from the concrete tool class's main function. */ protected void doStaticMain(String args[]) { int ret; try { ret = ToolRunner.run(HBaseConfiguration.create(), this, args); } catch (Exception ex) { LOG.error("Error running command-line tool", ex); ret = EXIT_FAILURE; } System.exit(ret); }
3.68
hadoop_AbstractS3ACommitter_getTaskCommitThreadCount
/** * Get the thread count for this task's commit operations. * @param context the JobContext for this commit * @return a possibly zero thread count. */ private int getTaskCommitThreadCount(final JobContext context) { return context.getConfiguration().getInt( FS_S3A_COMMITTER_THREADS, DEFAULT_COMMITTER_THREADS); }
3.68
pulsar_ShadedJCloudsUtils_addStandardModules
/** * Setup standard modules. * @param builder the build */ public static void addStandardModules(ContextBuilder builder) { List<AbstractModule> modules = new ArrayList<>(); modules.add(new SLF4JLoggingModule()); if (ENABLE_OKHTTP_MODULE) { modules.add(new OkHttpCommandExecutorServiceModule()); } else if (ENABLE_APACHE_HC_MODULE) { modules.add(new ApacheHCHttpCommandExecutorServiceModule()); } builder.modules(modules); }
3.68
flink_RestfulGateway_getTriggeredCheckpointStatus
/** * Get the status of a checkpoint triggered under the specified operation key. * * @param operationKey key of the operation * @return Future which completes immediately with the status, or fails if no operation is * registered for the key */ default CompletableFuture<OperationResult<Long>> getTriggeredCheckpointStatus( AsynchronousJobOperationKey operationKey) { throw new UnsupportedOperationException(); }
3.68
morf_AbstractSqlDialectTest_testSelectWithRowNumberFunction
/** * Test the row number function in a select. */ @Test public void testSelectWithRowNumberFunction() { SelectStatement stmt = new SelectStatement(new FieldReference(STRING_FIELD), rowNumber()) .from(new TableReference(ALTERNATE_TABLE)) .groupBy(new FieldReference(STRING_FIELD)); String expectedSql = "SELECT stringField, " + expectedRowNumber() + " FROM " + tableName(ALTERNATE_TABLE) + " GROUP BY stringField"; assertEquals("Select with row number function", expectedSql, testDialect.convertStatementToSQL(stmt)); }
3.68
hbase_ZKUtil_listChildrenBFSAndWatchThem
/** * BFS Traversal of all the children under path, with the entries in the list, in the same order * as that of the traversal. Lists all the children and set watches on to them. - zk reference - * path of node * @return list of children znodes under the path if unexpected ZooKeeper exception */ private static List<String> listChildrenBFSAndWatchThem(ZKWatcher zkw, final String znode) throws KeeperException { Deque<String> queue = new LinkedList<>(); List<String> tree = new ArrayList<>(); queue.add(znode); while (true) { String node = queue.pollFirst(); if (node == null) { break; } List<String> children = listChildrenAndWatchThem(zkw, node); if (children == null) { continue; } for (final String child : children) { final String childPath = node + "/" + child; queue.add(childPath); tree.add(childPath); } } return tree; }
3.68
morf_SqlDialect_withDeterministicName
/** * For testing only - the tableName might not be appropriate for your * dialect! The table will be a temporary table, specific to the dialect. * * @param tableName table name for the id table. * @return {@link IdTable}. */ public static IdTable withDeterministicName(String tableName) { return new IdTable(tableName, true); }
3.68
hmily_GrpcHmilyContext_getHmilyClass
/** * get hmilyClass conext. * * @return ThreadLocal */ public static ThreadLocal<GrpcInvokeContext> getHmilyClass() { return hmilyClass; }
3.68
framework_Tree_addExpandListener
/** * Adds the expand listener. * * @param listener * the Listener to be added. */ public void addExpandListener(ExpandListener listener) { addListener(ExpandEvent.class, listener, ExpandListener.EXPAND_METHOD); }
3.68
streampipes_SpServiceDefinition_addMigrators
/** * Add a list of migrations to the service definition. * This inherently checks for duplicates and sorts the migrations as such that * migrations affecting lower versions always come first. * * @param migrators migrators to add */ public void addMigrators(List<IModelMigrator<?, ?>> migrators) { for (var migratorToAdd : migrators) { if (this.migrators.stream().noneMatch(migrator -> MigrationComparison.isEqual(migrator, migratorToAdd))) { this.migrators.add(migratorToAdd); } } Collections.sort(this.migrators); }
3.68
flink_AsynchronousBlockReader_getNextReturnedBlock
/** * Gets the next memory segment that has been filled with data by the reader. This method blocks * until such a segment is available, or until an error occurs in the reader, or the reader is * closed. * * <p>WARNING: If this method is invoked without any segment ever returning (for example, * because the {@link #readBlock(MemorySegment)} method has not been invoked appropriately), the * method may block forever. * * @return The next memory segment from the reader's return queue. * @throws IOException Thrown, if an I/O error occurs in the reader while waiting for the * request to return. */ @Override public MemorySegment getNextReturnedBlock() throws IOException { try { while (true) { final MemorySegment next = this.returnSegments.poll(1000, TimeUnit.MILLISECONDS); if (next != null) { return next; } else { if (this.closed) { throw new IOException("The reader has been asynchronously closed."); } checkErroneous(); } } } catch (InterruptedException iex) { throw new IOException( "Reader was interrupted while waiting for the next returning segment."); } }
3.68
flink_ThreadSafeSimpleCounter_getCount
/** * Returns the current count. * * @return current count */ @Override public long getCount() { return longAdder.longValue(); }
3.68
hadoop_PlacementConstraints_and
/** * A conjunction of constraints. * * @param children the children constraints that should all be satisfied * @return the resulting placement constraint */ public static And and(AbstractConstraint... children) { return new And(children); }
3.68
flink_TableFactoryService_findAllInternal
/** * Finds a table factory of the given class, property map, and classloader. * * @param factoryClass desired factory class * @param properties properties that describe the factory configuration * @param classLoader classloader for service loading * @param <T> factory class type * @return the matching factory */ private static <T extends TableFactory> List<T> findAllInternal( Class<T> factoryClass, Map<String, String> properties, Optional<ClassLoader> classLoader) { List<TableFactory> tableFactories = discoverFactories(classLoader); return filter(tableFactories, factoryClass, properties); }
3.68
hudi_CompactionCommand_readCompactionPlanForActiveTimeline
/** * TBD Can we make this part of HoodieActiveTimeline or a utility class. */ private HoodieCompactionPlan readCompactionPlanForActiveTimeline(HoodieActiveTimeline activeTimeline, HoodieInstant instant) { try { if (!HoodieTimeline.COMPACTION_ACTION.equals(instant.getAction())) { try { // This could be a completed compaction. Assume a compaction request file is present but skip if fails return TimelineMetadataUtils.deserializeCompactionPlan( activeTimeline.readCompactionPlanAsBytes( HoodieTimeline.getCompactionRequestedInstant(instant.getTimestamp())).get()); } catch (HoodieIOException ioe) { // SKIP return null; } } else { return TimelineMetadataUtils.deserializeCompactionPlan(activeTimeline.readCompactionPlanAsBytes( HoodieTimeline.getCompactionRequestedInstant(instant.getTimestamp())).get()); } } catch (IOException e) { throw new HoodieIOException(e.getMessage(), e); } }
3.68
hadoop_Trash_expunge
/** * Delete old checkpoint(s). * @throws IOException raised on errors performing I/O. */ public void expunge() throws IOException { trashPolicy.deleteCheckpoint(); }
3.68
framework_VTabsheet_setCloseHandler
/** * Sets the close handler for this tab. This handler should be called * whenever closing of a tab is requested (by clicking the close button * or pressing the close key). * * @param closeHandler * the close handler * * @see VTabsheet#getCloseTabKey() */ public void setCloseHandler(VCloseHandler closeHandler) { this.closeHandler = closeHandler; }
3.68
hadoop_BaseRecord_getDeletionMs
/** * Get the deletion time for the expired record. The default is disabled. * Override for customized behavior. * * @return Deletion time for the expired record. */ public long getDeletionMs() { return -1; }
3.68
dubbo_UrlUtils_preferSerialization
/** * Prefer Serialization * * @param url url * @return {@link List}<{@link String}> */ public static List<String> preferSerialization(URL url) { String preferSerialization = url.getParameter(PREFER_SERIALIZATION_KEY); if (StringUtils.isNotBlank(preferSerialization)) { return Collections.unmodifiableList(StringUtils.splitToList(preferSerialization, ',')); } return Collections.emptyList(); }
3.68
hbase_RegionStates_getServerNode
/** Returns Pertinent ServerStateNode or NULL if none found (Do not make modifications). */ public ServerStateNode getServerNode(final ServerName serverName) { return serverMap.get(serverName); }
3.68
hbase_CompatibilitySingletonFactory_getInstance
/** * Get the singleton instance of Any classes defined by compatibiliy jar's * @return the singleton */ @SuppressWarnings("unchecked") public static <T> T getInstance(Class<T> klass) { synchronized (SingletonStorage.INSTANCE.lock) { T instance = (T) SingletonStorage.INSTANCE.instances.get(klass); if (instance == null) { try { ServiceLoader<T> loader = ServiceLoader.load(klass); Iterator<T> it = loader.iterator(); instance = it.next(); if (it.hasNext()) { StringBuilder msg = new StringBuilder(); msg.append("ServiceLoader provided more than one implementation for class: ") .append(klass).append(", using implementation: ").append(instance.getClass()) .append(", other implementations: {"); while (it.hasNext()) { msg.append(it.next()).append(" "); } msg.append("}"); LOG.warn(msg.toString()); } } catch (Exception e) { throw new RuntimeException(createExceptionString(klass), e); } catch (Error e) { throw new RuntimeException(createExceptionString(klass), e); } // If there was nothing returned and no exception then throw an exception. if (instance == null) { throw new RuntimeException(createExceptionString(klass)); } SingletonStorage.INSTANCE.instances.put(klass, instance); } return instance; } }
3.68
hbase_RequestConverter_buildGetRegionLoadRequest
/** * Create a protocol buffer GetRegionLoadRequest for all regions/regions of a table. * @param tableName the table for which regionLoad should be obtained from RS * @return a protocol buffer GetRegionLoadRequest */ public static GetRegionLoadRequest buildGetRegionLoadRequest(final TableName tableName) { GetRegionLoadRequest.Builder builder = GetRegionLoadRequest.newBuilder(); if (tableName != null) { builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); } return builder.build(); }
3.68
framework_ConnectorTracker_isWritingResponse
/** * Checks whether the response is currently being written. Connectors can * not be marked as dirty when a response is being written. * * @see #setWritingResponse(boolean) * @see #markDirty(ClientConnector) * * @return <code>true</code> if the response is currently being written, * <code>false</code> if outside the response writing phase. */ public boolean isWritingResponse() { return writingResponse; }
3.68
hbase_StripeStoreFileManager_findStripeForRow
/** * Finds the stripe index for the stripe containing a row provided externally for get/scan. */ private final int findStripeForRow(byte[] row, boolean isStart) { if (isStart && Arrays.equals(row, HConstants.EMPTY_START_ROW)) return 0; if (!isStart && Arrays.equals(row, HConstants.EMPTY_END_ROW)) { return state.stripeFiles.size() - 1; } // If there's an exact match below, a stripe ends at "row". Stripe right boundary is // exclusive, so that means the row is in the next stripe; thus, we need to add one to index. // If there's no match, the return value of binarySearch is (-(insertion point) - 1), where // insertion point is the index of the next greater element, or list size if none. The // insertion point happens to be exactly what we need, so we need to add one to the result. return Math.abs(Arrays.binarySearch(state.stripeEndRows, row, Bytes.BYTES_COMPARATOR) + 1); }
3.68
framework_VaadinService_getWrappedSession
/** * Retrieves the wrapped session for the request. * * @param request * The request for which to retrieve a session * @param requestCanCreateSession * true to create a new session if one currently does not exist * @return The retrieved (or created) wrapped session * @throws SessionExpiredException * If the request is not associated to a session and new session * creation is not allowed */ private WrappedSession getWrappedSession(VaadinRequest request, boolean requestCanCreateSession) throws SessionExpiredException { final WrappedSession session = request .getWrappedSession(requestCanCreateSession); if (session == null) { throw new SessionExpiredException(); } return session; }
3.68
hudi_MarkerDirState_processMarkerCreationRequests
/** * Processes pending marker creation requests. * * @param pendingMarkerCreationFutures futures of pending marker creation requests * @param fileIndex file index to use to write markers */ public void processMarkerCreationRequests( final List<MarkerCreationFuture> pendingMarkerCreationFutures, int fileIndex) { if (pendingMarkerCreationFutures.isEmpty()) { markFileAsAvailable(fileIndex); return; } LOG.debug("timeMs=" + System.currentTimeMillis() + " markerDirPath=" + markerDirPath + " numRequests=" + pendingMarkerCreationFutures.size() + " fileIndex=" + fileIndex); boolean shouldFlushMarkers = false; synchronized (markerCreationProcessingLock) { for (MarkerCreationFuture future : pendingMarkerCreationFutures) { String markerName = future.getMarkerName(); boolean exists = allMarkers.contains(markerName); if (!exists) { if (conflictDetectionStrategy.isPresent()) { try { conflictDetectionStrategy.get().detectAndResolveConflictIfNecessary(); } catch (HoodieEarlyConflictDetectionException he) { LOG.warn("Detected the write conflict due to a concurrent writer, " + "failing the marker creation as the early conflict detection is enabled", he); future.setResult(false); continue; } catch (Exception e) { LOG.warn("Failed to execute early conflict detection." + e.getMessage()); // When early conflict detection fails to execute, we still allow the marker creation // to continue addMarkerToMap(fileIndex, markerName); future.setResult(true); shouldFlushMarkers = true; continue; } } addMarkerToMap(fileIndex, markerName); shouldFlushMarkers = true; } future.setResult(!exists); } if (!isMarkerTypeWritten) { // Create marker directory and write marker type to MARKERS.type writeMarkerTypeToFile(); isMarkerTypeWritten = true; } } if (shouldFlushMarkers) { flushMarkersToFile(fileIndex); } markFileAsAvailable(fileIndex); for (MarkerCreationFuture future : pendingMarkerCreationFutures) { try { future.complete(jsonifyResult( future.getContext(), future.isSuccessful(), metricsRegistry, OBJECT_MAPPER, LOG)); } catch (JsonProcessingException e) { throw new HoodieException("Failed to JSON encode the value", e); } } }
3.68
hbase_HRegion_initialize
/** * Initialize this region. * @param reporter Tickle every so often if initialize is taking a while. * @return What the next sequence (edit) id should be. */ long initialize(final CancelableProgressable reporter) throws IOException { // Refuse to open the region if there is no column family in the table if (htableDescriptor.getColumnFamilyCount() == 0) { throw new DoNotRetryIOException("Table " + htableDescriptor.getTableName().getNameAsString() + " should have at least one column family."); } MonitoredTask status = TaskMonitor.get().createStatus("Initializing region " + this, false, true); long nextSeqId = -1; try { nextSeqId = initializeRegionInternals(reporter, status); return nextSeqId; } catch (IOException e) { LOG.warn("Failed initialize of region= {}, starting to roll back memstore", getRegionInfo().getRegionNameAsString(), e); // global memstore size will be decreased when dropping memstore try { // drop the memory used by memstore if open region fails dropMemStoreContents(); } catch (IOException ioE) { if (conf.getBoolean(MemStoreLAB.USEMSLAB_KEY, MemStoreLAB.USEMSLAB_DEFAULT)) { LOG.warn( "Failed drop memstore of region= {}, " + "some chunks may not released forever since MSLAB is enabled", getRegionInfo().getRegionNameAsString()); } } if (metricsTableRequests != null) { metricsTableRequests.removeRegistry(); } throw e; } finally { // nextSeqid will be -1 if the initialization fails. // At least it will be 0 otherwise. if (nextSeqId == -1) { status.abort("Exception during region " + getRegionInfo().getRegionNameAsString() + " initialization."); } if (LOG.isDebugEnabled()) { LOG.debug("Region open journal for {}:\n{}", this.getRegionInfo().getEncodedName(), status.prettyPrintJournal()); } status.cleanup(); } }
3.68
hadoop_AbstractLaunchableService_execute
/** * {@inheritDoc} * <p> * The action is to signal success by returning the exit code 0. */ @Override public int execute() throws Exception { return LauncherExitCodes.EXIT_SUCCESS; }
3.68
dubbo_RpcContextAttachment_getObjectAttachments
/** * get attachments. * * @return attachments */ @Override @Experimental("Experiment api for supporting Object transmission") public Map<String, Object> getObjectAttachments() { return attachments; }
3.68
cron-utils_Preconditions_format
/** * Substitutes each {@code %s} in {@code template} with an argument. These are matched by * position: the first {@code %s} gets {@code args[0]}, etc. If there are more arguments than * placeholders, the unmatched arguments will be appended to the end of the formatted message in * square braces. * * @param nullableTemplate a non-null string containing 0 or more {@code %s} placeholders. * @param args the arguments to be substituted into the message template. Arguments are converted * to strings using {@link String#valueOf(Object)}. Arguments can be null. */ // Note that this is somewhat-improperly used from Verify.java as well. private static String format(final String nullableTemplate, final Object... args) { final String template = String.valueOf(nullableTemplate); // null -> "null" // start substituting the arguments into the '%s' placeholders final StringBuilder builder = new StringBuilder(template.length() + 16 * args.length); int templateStart = 0; int i = 0; while (i < args.length) { final int placeholderStart = template.indexOf("%s", templateStart); if (placeholderStart == -1) { break; } builder.append(template.substring(templateStart, placeholderStart)); builder.append(args[i++]); templateStart = placeholderStart + 2; } builder.append(template.substring(templateStart)); // if we run out of placeholders, append the extra args in square braces if (i < args.length) { builder.append(" ["); builder.append(args[i++]); while (i < args.length) { builder.append(", "); builder.append(args[i++]); } builder.append(']'); } return builder.toString(); }
3.68
hadoop_SolverPreprocessor_mergeSkyline
/** * Merge different jobs' resource skylines into one within the same pipeline. * * @param resourceSkylines different jobs' resource skylines within the same * pipeline. * @return an aggregated resource skyline for the pipeline. */ public final ResourceSkyline mergeSkyline( final List<ResourceSkyline> resourceSkylines) { // TODO: // rewrite this function with shift and merge once YARN-5328 is committed /** First, getHistory the pipeline submission time. */ long pipelineSubmission = Long.MAX_VALUE; for (int i = 0; i < resourceSkylines.size(); i++) { long jobSubmission = resourceSkylines.get(i).getJobSubmissionTime(); if (pipelineSubmission > jobSubmission) { pipelineSubmission = jobSubmission; } } final TreeMap<Long, Resource> resourceOverTime = new TreeMap<>(); final RLESparseResourceAllocation skylineListAgg = new RLESparseResourceAllocation(resourceOverTime, new DefaultResourceCalculator()); /** * Second, adjust different jobs' ResourceSkyline starting time based on * pipeline submission time, and merge them into one ResourceSkyline. */ for (int i = 0; i < resourceSkylines.size(); i++) { long jobSubmission = resourceSkylines.get(i).getJobSubmissionTime(); long diff = (jobSubmission - pipelineSubmission) / 1000; RLESparseResourceAllocation tmp = resourceSkylines.get(i).getSkylineList(); Object[] timePoints = tmp.getCumulative().keySet().toArray(); for (int j = 0; j < timePoints.length - 2; j++) { ReservationInterval riAdd = new ReservationInterval(toIntExact((long) timePoints[j]) + diff, toIntExact((long) timePoints[j + 1] + diff)); skylineListAgg.addInterval(riAdd, tmp.getCapacityAtTime(toIntExact((long) timePoints[j]))); } } ResourceSkyline skylineAgg = new ResourceSkyline(resourceSkylines.get(0).getJobId(), resourceSkylines.get(0).getJobInputDataSize(), resourceSkylines.get(0).getJobSubmissionTime(), resourceSkylines.get(0).getJobFinishTime(), resourceSkylines.get(0).getContainerSpec(), skylineListAgg); return skylineAgg; }
3.68
hadoop_RecordCreatorFactory_getHost
/** * Return the host name. * @return the host name. */ Name getHost() { return host; }
3.68
framework_VAbstractCalendarPanel_setAssistiveLabelPreviousYear
/** * Set assistive label for the previous year element. * * @param label * the label to set * @since 8.4 */ public void setAssistiveLabelPreviousYear(String label) { prevYearAssistiveLabel = label; }
3.68
hudi_AbstractTableFileSystemView_getAllFilesInPartition
/** * Returns all files situated at the given partition. */ private FileStatus[] getAllFilesInPartition(String relativePartitionPath) throws IOException { Path partitionPath = FSUtils.getPartitionPath(metaClient.getBasePathV2(), relativePartitionPath); long beginLsTs = System.currentTimeMillis(); FileStatus[] statuses = listPartition(partitionPath); long endLsTs = System.currentTimeMillis(); LOG.debug("#files found in partition (" + relativePartitionPath + ") =" + statuses.length + ", Time taken =" + (endLsTs - beginLsTs)); return statuses; }
3.68
shardingsphere-elasticjob_TransactionOperation_opUpdate
/** * Operation update. * * @param key key * @param value value * @return TransactionOperation */ public static TransactionOperation opUpdate(final String key, final String value) { return new TransactionOperation(Type.UPDATE, key, value); }
3.68
hibernate-validator_ValueExtractorResolver_getValueExtractorCandidatesForCascadedValidation
/** * Used to determine the value extractor candidates valid for a declared type and type variable. * <p> * The effective value extractor will be narrowed from these candidates using the runtime type. * <p> * Used to optimize the choice of the value extractor in the case of cascading validation. */ public Set<ValueExtractorDescriptor> getValueExtractorCandidatesForCascadedValidation(Type declaredType, TypeVariable<?> typeParameter) { Set<ValueExtractorDescriptor> valueExtractorDescriptors = new HashSet<>(); valueExtractorDescriptors.addAll( getRuntimeAndContainerElementCompliantValueExtractorsFromPossibleCandidates( declaredType, typeParameter, TypeHelper.getErasedReferenceType( declaredType ), registeredValueExtractors ) ); valueExtractorDescriptors.addAll( getPotentiallyRuntimeTypeCompliantAndContainerElementCompliantValueExtractors( declaredType, typeParameter ) ); return CollectionHelper.toImmutableSet( valueExtractorDescriptors ); }
3.68
hadoop_ClientRegistryBinder_getEndpoint
/** * Get an endpont by API * @param record service record * @param api API * @param external flag to indicate this is an external record * @return the endpoint or null */ public static Endpoint getEndpoint(ServiceRecord record, String api, boolean external) { return external ? record.getExternalEndpoint(api) : record.getInternalEndpoint(api); }
3.68
framework_CustomizedSystemMessages_setCookiesDisabledURL
/** * Sets the URL to redirect to when the browser has cookies disabled. * * @param cookiesDisabledURL * the URL to redirect to, or null to reload the current URL */ public void setCookiesDisabledURL(String cookiesDisabledURL) { this.cookiesDisabledURL = cookiesDisabledURL; }
3.68
flink_StateBootstrapTransformation_writeOperatorState
/** * @param operatorID The operator id for the stream operator. * @param stateBackend The state backend for the job. * @param config Additional configurations applied to the bootstrap stream tasks. * @param globalMaxParallelism Global max parallelism set for the savepoint. * @param savepointPath The path where the savepoint will be written. * @return The operator subtask states for this bootstrap transformation. */ DataStream<OperatorState> writeOperatorState( OperatorID operatorID, StateBackend stateBackend, Configuration config, int globalMaxParallelism, Path savepointPath) { int localMaxParallelism = getMaxParallelism(globalMaxParallelism); return writeOperatorSubtaskStates( operatorID, stateBackend, config, savepointPath, localMaxParallelism) .transform( "reduce(OperatorSubtaskState)", TypeInformation.of(OperatorState.class), new GroupReduceOperator<>( new OperatorSubtaskStateReducer(operatorID, localMaxParallelism))) .forceNonParallel(); }
3.68
zxing_QRCodeDecoderMetaData_isMirrored
/** * @return true if the QR Code was mirrored. */ public boolean isMirrored() { return mirrored; }
3.68
hibernate-validator_PropertyMetaData_getCascadables
/** * Returns the cascadables of this property, if any. Often, there will be just a single element returned. Several * elements may be returned in the following cases: * <ul> * <li>a property's field has been marked with {@code @Valid} but type-level constraints have been given on the * getter</li> * <li>one type parameter of a property has been marked with {@code @Valid} on the field (e.g. a map's key) but * another type parameter has been marked with {@code @Valid} on the property (e.g. the map's value)</li> * <li>a (shaded) private field in a super-type and another field of the same name in a sub-type are both marked * with {@code @Valid}</li> * </ul> */ public Set<Cascadable> getCascadables() { return cascadables; }
3.68
pulsar_LoadSimulationController_writeProducerOptions
// Write options that are common to modifying and creating topics. private void writeProducerOptions(final DataOutputStream outputStream, final ShellArguments arguments, final String topic) throws Exception { if (!arguments.rangeString.isEmpty()) { // If --rand-rate was specified, extract the bounds by splitting on // the comma and parsing the resulting // doubles. final String[] splits = arguments.rangeString.split(","); if (splits.length != 2) { log.error("Argument to --rand-rate should be two comma-separated values"); return; } final double first = Double.parseDouble(splits[0]); final double second = Double.parseDouble(splits[1]); final double min = Math.min(first, second); final double max = Math.max(first, second); arguments.rate = random.nextDouble() * (max - min) + min; } outputStream.writeUTF(topic); outputStream.writeInt(arguments.size); outputStream.writeDouble(arguments.rate); }
3.68
flink_CliClient_executeInitialization
/** Initialize the Cli Client with the content. */ public boolean executeInitialization(String content) { try { OutputStream outputStream = new ByteArrayOutputStream(256); terminal = TerminalUtils.createDumbTerminal(outputStream); boolean success = executeFile(content, outputStream, ExecutionMode.INITIALIZATION); LOG.info(outputStream.toString()); return success; } finally { closeTerminal(); } }
3.68
hadoop_StageConfig_withDeleteTargetPaths
/** * Set builder value. * @param value new value * @return the builder */ public StageConfig withDeleteTargetPaths(boolean value) { checkOpen(); deleteTargetPaths = value; return this; }
3.68
zilla_HttpClientFactory_collectHeaders
// Collect headers into map to resolve target // TODO avoid this private void collectHeaders( DirectBuffer name, DirectBuffer value) { if (!error()) { String nameStr = name.getStringWithoutLengthUtf8(0, name.capacity()); String valueStr = value.getStringWithoutLengthUtf8(0, value.capacity()); // TODO cookie needs to be appended with ';' headers.merge(nameStr, valueStr, (o, n) -> String.format("%s, %s", o, n)); } }
3.68
hadoop_AbfsClient_checkAccess
/** * Talks to the server to check whether the permission specified in * the rwx parameter is present for the path specified in the path parameter. * * @param path Path for which access check needs to be performed * @param rwx The permission to be checked on the path * @param tracingContext Tracks identifiers for request header * @return The {@link AbfsRestOperation} object for the operation * @throws AzureBlobFileSystemException in case of bad requests */ public AbfsRestOperation checkAccess(String path, String rwx, TracingContext tracingContext) throws AzureBlobFileSystemException { AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); abfsUriQueryBuilder.addQuery(QUERY_PARAM_ACTION, CHECK_ACCESS); abfsUriQueryBuilder.addQuery(QUERY_FS_ACTION, rwx); appendSASTokenToQuery(path, SASTokenProvider.CHECK_ACCESS_OPERATION, abfsUriQueryBuilder); URL url = createRequestUrl(path, abfsUriQueryBuilder.toString()); AbfsRestOperation op = getAbfsRestOperation( AbfsRestOperationType.CheckAccess, AbfsHttpConstants.HTTP_METHOD_HEAD, url, createDefaultHeaders()); op.execute(tracingContext); return op; }
3.68
hmily_HmilyFeignConfiguration_hmilyFeignInterceptor
/** * Hmily rest template interceptor request interceptor. * * @return the request interceptor */ @Bean @Qualifier("hmilyFeignInterceptor") public RequestInterceptor hmilyFeignInterceptor() { return new HmilyFeignInterceptor(); }
3.68
flink_ProcessingTimeSessionWindows_mergeWindows
/** Merge overlapping {@link TimeWindow}s. */ @Override public void mergeWindows(Collection<TimeWindow> windows, MergeCallback<TimeWindow> c) { TimeWindow.mergeWindows(windows, c); }
3.68
hadoop_SendRequestIntercept_eventOccurred
/** * Handler which processes the sending request event from Azure SDK. The * handler simply sets reset the conditional header to make all read requests * unconditional if reads with concurrent OOB writes are allowed. * * @param sendEvent * - send event context from Windows Azure SDK. */ @Override public void eventOccurred(SendingRequestEvent sendEvent) { if (!(sendEvent.getConnectionObject() instanceof HttpURLConnection)) { // Pass if there is no HTTP connection associated with this send // request. return; } // Capture the HTTP URL connection object and get size of the payload for // the request. HttpURLConnection urlConnection = (HttpURLConnection) sendEvent .getConnectionObject(); // Determine whether this is a download request by checking that the request // method // is a "GET" operation. if (urlConnection.getRequestMethod().equalsIgnoreCase("GET")) { // If concurrent reads on OOB writes are allowed, reset the if-match // condition on the conditional header. urlConnection.setRequestProperty(HeaderConstants.IF_MATCH, ALLOW_ALL_REQUEST_PRECONDITIONS); } }
3.68
hudi_HoodieTableMetadataUtil_mapRecordKeyToFileGroupIndex
/** * Map a record key to a file group in partition of interest. * <p> * Note: For hashing, the algorithm is same as String.hashCode() but is being defined here as hashCode() * implementation is not guaranteed by the JVM to be consistent across JVM versions and implementations. * * @param recordKey record key for which the file group index is looked up for. * @return An integer hash of the given string */ public static int mapRecordKeyToFileGroupIndex(String recordKey, int numFileGroups) { int h = 0; for (int i = 0; i < recordKey.length(); ++i) { h = 31 * h + recordKey.charAt(i); } return Math.abs(Math.abs(h) % numFileGroups); }
3.68
hbase_QuotaUtil_addTableQuota
/* * ========================================================================= Quota "settings" * helpers */ public static void addTableQuota(final Connection connection, final TableName table, final Quotas data) throws IOException { addQuotas(connection, getTableRowKey(table), data); }
3.68
flink_BlobKey_readFromInputStream
/** * Auxiliary method to read a BLOB key from an input stream. * * @param inputStream the input stream to read the BLOB key from * @return the read BLOB key * @throws IOException throw if an I/O error occurs while reading from the input stream */ static BlobKey readFromInputStream(InputStream inputStream) throws IOException { final byte[] key = new byte[BlobKey.SIZE]; final byte[] random = new byte[AbstractID.SIZE]; int bytesRead = 0; // read key while (bytesRead < key.length) { final int read = inputStream.read(key, bytesRead, key.length - bytesRead); if (read < 0) { throw new EOFException("Read an incomplete BLOB key"); } bytesRead += read; } // read BLOB type final BlobType blobType; { final int read = inputStream.read(); if (read < 0) { throw new EOFException("Read an incomplete BLOB type"); } else if (read == TRANSIENT_BLOB.ordinal()) { blobType = TRANSIENT_BLOB; } else if (read == PERMANENT_BLOB.ordinal()) { blobType = PERMANENT_BLOB; } else { throw new IOException("Invalid data received for the BLOB type: " + read); } } // read random component bytesRead = 0; while (bytesRead < AbstractID.SIZE) { final int read = inputStream.read(random, bytesRead, AbstractID.SIZE - bytesRead); if (read < 0) { throw new EOFException("Read an incomplete BLOB key"); } bytesRead += read; } return createKey(blobType, key, random); }
3.68
framework_SerializablePredicate_negate
/** * Returns a predicate that represents the logical negation of this * predicate. * * @return a predicate that represents the logical negation of this * predicate * @since 8.5 */ default SerializablePredicate<T> negate() { return t -> !test(t); }
3.68
hbase_TableDescriptorChecker_warnOrThrowExceptionForFailure
// HBASE-13350 - Helper method to log warning on sanity check failures if checks disabled. private static void warnOrThrowExceptionForFailure(boolean logWarn, String message, Exception cause) throws IOException { if (!logWarn) { throw new DoNotRetryIOException(message + " Set " + TABLE_SANITY_CHECKS + " to false at conf or table descriptor if you want to bypass sanity checks", cause); } LOG.warn(message); }
3.68
hudi_MaxwellJsonKafkaSourcePostProcessor_isTargetTable
/** * Check if it is the right table we want to consume from. * * @param database database the data belong to * @param table table the data belong to */ private boolean isTargetTable(String database, String table) { if (!databaseRegex.isPresent()) { return Pattern.matches(tableRegex, table); } else { return Pattern.matches(databaseRegex.get(), database) && Pattern.matches(tableRegex, table); } }
3.68