name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_PrimitiveArrayTypeInfo_getComponentType
/** * Gets the type information of the component type. * * @return The type information of the component type. */ @PublicEvolving public TypeInformation<?> getComponentType() { return BasicTypeInfo.getInfoFor(getComponentClass()); }
3.68
morf_HumanReadableStatementProducer_addTable
/** @see org.alfasoftware.morf.upgrade.SchemaEditor#addTable(org.alfasoftware.morf.metadata.Table) **/ @Override public void addTable(Table definition) { consumer.schemaChange(HumanReadableStatementHelper.generateAddTableString(definition)); }
3.68
pulsar_BrokerMonitor_main
/** * Run a monitor from command line arguments. * * @param args Arguments for the monitor. */ public static void main(String[] args) throws Exception { final Arguments arguments = new Arguments(); final JCommander jc = new JCommander(arguments); jc.setProgramName("pulsar-perf monitor-brokers"); try { jc.parse(args); } catch (ParameterException e) { System.out.println(e.getMessage()); jc.usage(); PerfClientUtils.exit(1); } if (arguments.extensions) { final BrokerMonitor monitor = new BrokerMonitor(arguments.connectString); monitor.startBrokerLoadDataStoreMonitor(); } else { final ZooKeeper zkClient = new ZooKeeper(arguments.connectString, ZOOKEEPER_TIMEOUT_MILLIS, null); final BrokerMonitor monitor = new BrokerMonitor(zkClient); monitor.start(); } }
3.68
flink_DeclarativeSlotManager_suspend
/** Suspends the component. This clears the internal state of the slot manager. */ @Override public void suspend() { if (!started) { return; } LOG.info("Suspending the slot manager."); slotManagerMetricGroup.close(); resourceTracker.clear(); if (taskExecutorManager != null) { taskExecutorManager.close(); for (InstanceID registeredTaskManager : taskExecutorManager.getTaskExecutors()) { unregisterTaskManager( registeredTaskManager, new SlotManagerException("The slot manager is being suspended.")); } } taskExecutorManager = null; resourceManagerId = null; resourceEventListener = null; blockedTaskManagerChecker = null; started = false; }
3.68
hudi_HoodieTable_getIndex
/** * Return the index. */ public HoodieIndex<?, ?> getIndex() { return index; }
3.68
hadoop_AbstractClientRequestInterceptor_setNextInterceptor
/** * Sets the {@link ClientRequestInterceptor} in the chain. */ @Override public void setNextInterceptor(ClientRequestInterceptor nextInterceptor) { this.nextInterceptor = nextInterceptor; } /** * Sets the {@link Configuration}
3.68
flink_DeltaIterationBase_getBroadcastInputs
/** * DeltaIteration meta operator cannot have broadcast inputs. * * @return An empty map. */ public Map<String, Operator<?>> getBroadcastInputs() { return Collections.emptyMap(); }
3.68
hadoop_WritableName_addName
/** * Add an alternate name for a class. * @param writableClass input writableClass. * @param name input name. */ public static synchronized void addName(Class<?> writableClass, String name) { NAME_TO_CLASS.put(name, writableClass); }
3.68
hadoop_IOStatisticsLogging_sortedMap
/** * Create a sorted (tree) map from an unsorted map. * This incurs the cost of creating a map and that * of inserting every object into the tree. * @param source source map * @param <E> value type * @return a treemap with all the entries. */ private static <E> Map<String, E> sortedMap( final Map<String, E> source, final Predicate<E> isEmpty) { Map<String, E> tm = new TreeMap<>(); for (Map.Entry<String, E> entry : source.entrySet()) { if (!isEmpty.test(entry.getValue())) { tm.put(entry.getKey(), entry.getValue()); } } return tm; }
3.68
AreaShop_GithubUpdateCheck_checkUpdate
/** * Check if an update is available. * @param callback Callback to execute when the update check is done * @return GithubUpdateCheck containing the status of the check */ public GithubUpdateCheck checkUpdate(UpdateCallback callback) { checking = true; final GithubUpdateCheck self = this; // Check for update on asyn thread new BukkitRunnable() { @Override public void run() { try { try { String rawUrl = API_HOST + "/" + author + "/" + repository + "/" + API_LATEST_RELEASE; url = new URL(rawUrl); } catch(MalformedURLException e) { logger.severe("Invalid url: '" + url + "', are the author '" + author + "' and repository '" + repository + "' correct?"); error = true; return; } try { URLConnection conn = url.openConnection(); // Give up after 15 seconds conn.setConnectTimeout(15000); // Identify ourselves conn.addRequestProperty("User-Agent", USER_AGENT); // Make sure we access the correct api version conn.addRequestProperty("Accept", "application/vnd.github.v3+json"); // We want to read the result conn.setDoOutput(true); // Open connection try(BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()))) { String response = reader.readLine(); debug("Response:", response); JSONObject latestRelease = (JSONObject)JSONValue.parse(response); if(latestRelease.isEmpty()) { logger.warning("Failed to get api response from " + url); error = true; return; } debug("json: " + latestRelease.toJSONString()); // Latest version latestVersion = (String)latestRelease.get("tag_name"); debug("Tag name:", latestVersion); // Current version debug("Plugin version:", currentVersion); // Compare version hasUpdate = versionComparator.isNewer(latestVersion, currentVersion); } } catch(IOException e) { logger.severe("Failed to get latest release:" + ExceptionUtils.getStackTrace(e)); error = true; } catch(ClassCastException e) { logger.info("Unexpected structure of the result, failed to parse it"); error = true; } } finally { checking = false; debug("result:", self); if(callback != null) { // Switch back to main thread and call the callback new BukkitRunnable() { @Override public void run() { callback.run(self); } }.runTask(plugin); } } } }.runTaskAsynchronously(plugin); return this; }
3.68
hadoop_AMRMClientAsyncImpl_removeContainerRequest
/** * Remove previous container request. The previous container request may have * already been sent to the ResourceManager. So even after the remove request * the app must be prepared to receive an allocation for the previous request * even after the remove request * @param req Resource request */ public void removeContainerRequest(T req) { client.removeContainerRequest(req); }
3.68
hbase_BalanceRequest_isIgnoreRegionsInTransition
/** * Returns true if the balancer should execute even if regions are in transition, otherwise false. * This is an advanced usage feature, as it can cause more issues than it fixes. */ public boolean isIgnoreRegionsInTransition() { return ignoreRegionsInTransition; }
3.68
flink_BiFunctionWithException_unchecked
/** * Convert at {@link BiFunctionWithException} into a {@link BiFunction}. * * @param biFunctionWithException function with exception to convert into a function * @param <A> input type * @param <B> output type * @return {@link BiFunction} which throws all checked exception as an unchecked exception. */ static <A, B, C> BiFunction<A, B, C> unchecked( BiFunctionWithException<A, B, C, ?> biFunctionWithException) { return (A a, B b) -> { try { return biFunctionWithException.apply(a, b); } catch (Throwable t) { ExceptionUtils.rethrow(t); // we need this to appease the compiler :-( return null; } }; }
3.68
pulsar_WindowManager_scanEvents
/** * Scan events in the queue, using the expiration policy to check * if the event should be evicted or not. * * @param fullScan if set, will scan the entire queue; if not set, will stop * as soon as an event not satisfying the expiration policy is found * @return the list of events to be processed as a part of the current window */ private List<Event<T>> scanEvents(boolean fullScan) { log.debug("Scan events, eviction policy {}", evictionPolicy); List<Event<T>> eventsToExpire = new ArrayList<>(); List<Event<T>> eventsToProcess = new ArrayList<>(); lock.lock(); try { Iterator<Event<T>> it = queue.iterator(); while (it.hasNext()) { Event<T> windowEvent = it.next(); EvictionPolicy.Action action = evictionPolicy.evict(windowEvent); if (action == EXPIRE) { eventsToExpire.add(windowEvent); it.remove(); } else if (!fullScan || action == STOP) { break; } else if (action == PROCESS) { eventsToProcess.add(windowEvent); } } expiredEvents.addAll(eventsToExpire); } finally { lock.unlock(); } eventsSinceLastExpiry.set(0); if (log.isDebugEnabled()) { log.debug("[{}] events expired from window.", eventsToExpire.size()); } if (!eventsToExpire.isEmpty()) { log.debug("invoking windowLifecycleListener.onExpiry"); windowLifecycleListener.onExpiry(eventsToExpire); } return eventsToProcess; }
3.68
graphhopper_LandmarkStorage_getFromWeight
/** * @return the weight from the landmark to the specified node. Where the landmark integer is not * a node ID but the internal index of the landmark array. */ int getFromWeight(int landmarkIndex, int node) { int res = (int) landmarkWeightDA.getShort((long) node * LM_ROW_LENGTH + landmarkIndex * 4L + FROM_OFFSET) & 0x0000FFFF; if (res == SHORT_INFINITY) // TODO can happen if endstanding oneway // we should set a 'from' value to SHORT_MAX if the 'to' value was already set to find real bugs // and what to return? Integer.MAX_VALUE i.e. convert to Double.pos_infinity upstream? return SHORT_MAX; // throw new IllegalStateException("Do not call getFromWeight for wrong landmark[" + landmarkIndex + "]=" + landmarkIDs[landmarkIndex] + " and node " + node); // TODO if(res == MAX) fallback to beeline approximation!? return res; }
3.68
framework_TooltipDelay_getTestDescription
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTestDescription() */ @Override protected String getTestDescription() { return "Tooltips should appear with a five second delay."; }
3.68
hbase_HMaster_getMobCompactionState
/** * Gets the mob file compaction state for a specific table. Whether all the mob files are selected * is known during the compaction execution, but the statistic is done just before compaction * starts, it is hard to know the compaction type at that time, so the rough statistics are chosen * for the mob file compaction. Only two compaction states are available, * CompactionState.MAJOR_AND_MINOR and CompactionState.NONE. * @param tableName The current table name. * @return If a given table is in mob file compaction now. */ public GetRegionInfoResponse.CompactionState getMobCompactionState(TableName tableName) { AtomicInteger compactionsCount = mobCompactionStates.get(tableName); if (compactionsCount != null && compactionsCount.get() != 0) { return GetRegionInfoResponse.CompactionState.MAJOR_AND_MINOR; } return GetRegionInfoResponse.CompactionState.NONE; }
3.68
hadoop_AzureADAuthenticator_getTokenUsingClientCreds
/** * gets Azure Active Directory token using the user ID and password of * a service principal (that is, Web App in Azure Active Directory). * * Azure Active Directory allows users to set up a web app as a * service principal. Users can optionally obtain service principal keys * from AAD. This method gets a token using a service principal's client ID * and keys. In addition, it needs the token endpoint associated with the * user's directory. * * * @param authEndpoint the OAuth 2.0 token endpoint associated * with the user's directory (obtain from * Active Directory configuration) * @param clientId the client ID (GUID) of the client web app * btained from Azure Active Directory configuration * @param clientSecret the secret key of the client web app * @return {@link AzureADToken} obtained using the creds * @throws IOException throws IOException if there is a failure in connecting to Azure AD */ public static AzureADToken getTokenUsingClientCreds(String authEndpoint, String clientId, String clientSecret) throws IOException { Preconditions.checkNotNull(authEndpoint, "authEndpoint"); Preconditions.checkNotNull(clientId, "clientId"); Preconditions.checkNotNull(clientSecret, "clientSecret"); boolean isVersion2AuthenticationEndpoint = authEndpoint.contains("/oauth2/v2.0/"); QueryParams qp = new QueryParams(); if (isVersion2AuthenticationEndpoint) { qp.add("scope", SCOPE); } else { qp.add("resource", RESOURCE_NAME); } qp.add("grant_type", "client_credentials"); qp.add("client_id", clientId); qp.add("client_secret", clientSecret); LOG.debug("AADToken: starting to fetch token using client creds for client ID " + clientId); return getTokenCall(authEndpoint, qp.serialize(), null, null); }
3.68
hudi_KafkaConnectUtils_getWriteStatuses
/** * Unwrap the Hudi {@link WriteStatus} from the received Protobuf message. * * @param participantInfo The {@link ControlMessage.ParticipantInfo} that contains the * underlying {@link WriteStatus} sent by the participants. * @return the list of {@link WriteStatus} returned by Hudi on a write transaction. */ public static List<WriteStatus> getWriteStatuses(ControlMessage.ParticipantInfo participantInfo) { ControlMessage.ConnectWriteStatus connectWriteStatus = participantInfo.getWriteStatus(); return SerializationUtils.deserialize(connectWriteStatus.getSerializedWriteStatus().toByteArray()); }
3.68
hudi_HoodieInputFormatUtils_getFilteredCommitsTimeline
/** * Extract HoodieTimeline based on HoodieTableMetaClient. * * @param job * @param tableMetaClient * @return */ public static Option<HoodieTimeline> getFilteredCommitsTimeline(JobContext job, HoodieTableMetaClient tableMetaClient) { String tableName = tableMetaClient.getTableConfig().getTableName(); HoodieDefaultTimeline baseTimeline; if (HoodieHiveUtils.stopAtCompaction(job, tableName)) { baseTimeline = filterInstantsTimeline(tableMetaClient.getActiveTimeline()); } else { baseTimeline = tableMetaClient.getActiveTimeline(); } HollowCommitHandling handlingMode = HollowCommitHandling.valueOf(job.getConfiguration() .get(INCREMENTAL_READ_HANDLE_HOLLOW_COMMIT.key(), INCREMENTAL_READ_HANDLE_HOLLOW_COMMIT.defaultValue())); HoodieTimeline filteredTimeline = handleHollowCommitIfNeeded( baseTimeline.getCommitsTimeline().filterCompletedInstants(), tableMetaClient, handlingMode); return Option.of(filteredTimeline); }
3.68
hadoop_ConfigFormat_resolve
/** * Get a matching format or null * @param type * @return the format */ public static ConfigFormat resolve(String type) { for (ConfigFormat format: values()) { if (format.getSuffix().equals(type.toLowerCase(Locale.ENGLISH))) { return format; } } return null; }
3.68
framework_AbstractOrderedLayoutConnector_updateSlotListeners
/** * Add/remove necessary ElementResizeListeners for one slot. This should be * called after each update to the slot's or it's widget. */ private void updateSlotListeners(ComponentConnector child) { Slot slot = getWidget().getSlot(child.getWidget()); // Clear all possible listeners first slot.setWidgetResizeListener(null); if (slot.hasCaption()) { slot.setCaptionResizeListener(null); } if (slot.hasSpacing()) { slot.setSpacingResizeListener(null); } // Add all necessary listeners boolean listenersAdded = false; if (needsFixedHeight()) { slot.setWidgetResizeListener(childComponentResizeListener); if (slot.hasCaption()) { slot.setCaptionResizeListener(slotCaptionResizeListener); } listenersAdded = true; } else if ((hasChildrenWithRelativeHeight || hasChildrenWithRelativeWidth) && slot.hasCaption()) { /* * If the slot has caption, we need to listen for its size changes * in order to update the padding/margin offset for relative sized * components. * * TODO might only be needed if the caption is in the same direction * as the relative size? */ slot.setCaptionResizeListener(slotCaptionResizeListener); listenersAdded = true; } if (needsExpand()) { // TODO widget resize only be needed for children without expand? slot.setWidgetResizeListener(childComponentResizeListener); if (slot.hasSpacing()) { slot.setSpacingResizeListener(spacingResizeListener); } listenersAdded = true; } if (listenersAdded) { // removing these listeners makes widget unmeasurable and resets the // measured height, measure again if listeners got added back getLayoutManager().setNeedsMeasure(child); } }
3.68
hbase_RequestConverter_buildAssignRegionRequest
/** * Create a protocol buffer AssignRegionRequest * @return an AssignRegionRequest */ public static AssignRegionRequest buildAssignRegionRequest(final byte[] regionName) { AssignRegionRequest.Builder builder = AssignRegionRequest.newBuilder(); builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)); return builder.build(); }
3.68
querydsl_JTSGeometryExpression_geometryType
/** * Returns the name of the instantiable subtype of Geometry of which this * geometric object is an instantiable member. The name of the subtype of Geometry is returned as a string. * * @return geometry type */ public StringExpression geometryType() { if (geometryType == null) { geometryType = Expressions.stringOperation(SpatialOps.GEOMETRY_TYPE, mixin); } return geometryType; }
3.68
shardingsphere-elasticjob_JobFacade_beforeJobExecuted
/** * Call before job executed. * * @param shardingContexts sharding contexts */ public void beforeJobExecuted(final ShardingContexts shardingContexts) { for (ElasticJobListener each : elasticJobListeners) { each.beforeJobExecuted(shardingContexts); } }
3.68
hbase_HFileCleaner_startHFileDeleteThreads
/** * Start threads for hfile deletion */ private void startHFileDeleteThreads() { final String n = Thread.currentThread().getName(); running = true; // start thread for large file deletion for (int i = 0; i < largeFileDeleteThreadNumber; i++) { Thread large = new Thread() { @Override public void run() { consumerLoop(largeFileQueue); } }; large.setDaemon(true); large.setName(n + "-HFileCleaner.large." + i + "-" + EnvironmentEdgeManager.currentTime()); large.start(); LOG.debug("Starting for large file={}", large); threads.add(large); } // start thread for small file deletion for (int i = 0; i < smallFileDeleteThreadNumber; i++) { Thread small = new Thread() { @Override public void run() { consumerLoop(smallFileQueue); } }; small.setDaemon(true); small.setName(n + "-HFileCleaner.small." + i + "-" + EnvironmentEdgeManager.currentTime()); small.start(); LOG.debug("Starting for small files={}", small); threads.add(small); } }
3.68
flink_KubernetesSessionCli_repStep
/** * Check whether need to continue or kill the cluster. * * @param in input buffer reader * @return f0, whether need to continue read from input. f1, whether need to kill the cluster. */ private Tuple2<Boolean, Boolean> repStep(BufferedReader in) throws IOException, InterruptedException { final long startTime = System.currentTimeMillis(); while ((System.currentTimeMillis() - startTime) < CLIENT_POLLING_INTERVAL_MS && (!in.ready())) { Thread.sleep(200L); } // ------------- handle interactive command by user. ---------------------- if (in.ready()) { final String command = in.readLine(); switch (command) { case "quit": return new Tuple2<>(false, false); case "stop": return new Tuple2<>(false, true); case "help": System.err.println(KUBERNETES_CLUSTER_HELP); break; default: System.err.println("Unknown command '" + command + "'. Showing help:"); System.err.println(KUBERNETES_CLUSTER_HELP); break; } } return new Tuple2<>(true, false); }
3.68
hbase_SnapshotManager_resetTempDir
/** * Cleans up any zk-coordinated snapshots in the snapshot/.tmp directory that were left from * failed snapshot attempts. For unfinished procedure2-coordinated snapshots, keep the working * directory. * @throws IOException if we can't reach the filesystem */ private void resetTempDir() throws IOException { Set<String> workingProcedureCoordinatedSnapshotNames = snapshotToProcIdMap.keySet().stream().map(s -> s.getName()).collect(Collectors.toSet()); Path tmpdir = SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir, master.getConfiguration()); FileSystem tmpFs = tmpdir.getFileSystem(master.getConfiguration()); FileStatus[] workingSnapshotDirs = CommonFSUtils.listStatus(tmpFs, tmpdir); if (workingSnapshotDirs == null) { return; } for (FileStatus workingSnapshotDir : workingSnapshotDirs) { String workingSnapshotName = workingSnapshotDir.getPath().getName(); if (!workingProcedureCoordinatedSnapshotNames.contains(workingSnapshotName)) { try { if (tmpFs.delete(workingSnapshotDir.getPath(), true)) { LOG.info("delete unfinished zk-coordinated snapshot working directory {}", workingSnapshotDir.getPath()); } else { LOG.warn("Couldn't delete unfinished zk-coordinated snapshot working directory {}", workingSnapshotDir.getPath()); } } catch (IOException e) { LOG.warn("Couldn't delete unfinished zk-coordinated snapshot working directory {}", workingSnapshotDir.getPath(), e); } } else { LOG.debug("find working directory of unfinished procedure {}", workingSnapshotName); } } }
3.68
flink_JobMaster_updateTaskExecutionState
/** * Updates the task execution state for a given task. * * @param taskExecutionState New task execution state for a given task * @return Acknowledge the task execution state update */ @Override public CompletableFuture<Acknowledge> updateTaskExecutionState( final TaskExecutionState taskExecutionState) { FlinkException taskExecutionException; try { checkNotNull(taskExecutionState, "taskExecutionState"); if (schedulerNG.updateTaskExecutionState(taskExecutionState)) { return CompletableFuture.completedFuture(Acknowledge.get()); } else { taskExecutionException = new ExecutionGraphException( "The execution attempt " + taskExecutionState.getID() + " was not found."); } } catch (Exception e) { taskExecutionException = new JobMasterException( "Could not update the state of task execution for JobMaster.", e); handleJobMasterError(taskExecutionException); } return FutureUtils.completedExceptionally(taskExecutionException); }
3.68
shardingsphere-elasticjob_ElasticJobSnapshotServiceConfiguration_snapshotService
/** * Create a Snapshot service bean and start listening. * * @param registryCenter registry center * @param snapshotServiceProperties snapshot service properties * @return a bean of snapshot service */ @ConditionalOnProperty(name = "elasticjob.dump.port") @Bean(initMethod = "listen", destroyMethod = "close") public SnapshotService snapshotService(final CoordinatorRegistryCenter registryCenter, final SnapshotServiceProperties snapshotServiceProperties) { return new SnapshotService(registryCenter, snapshotServiceProperties.getPort()); }
3.68
framework_AbstractComponent_setData
/** * Sets the data object, that can be used for any application specific data. * The component does not use or modify this data. * * @param data * the Application specific data. * @since 3.1 */ public void setData(Object data) { applicationData = data; }
3.68
framework_VUpload_updateEnabledForSubmitButton
/** * Updates the enabled status for submit button. If the widget itself is * disabled, so is the submit button. It must also follow overall enabled * status in immediate mode, otherwise you cannot select a file at all. In * non-immediate mode there is another button for selecting the file, so the * submit button should be disabled until a file has been selected, unless * upload without selection has been specifically allowed. */ private void updateEnabledForSubmitButton() { if (enabled && (isImmediateMode() || hasFilename() || allowUploadWithoutFilename)) { submitButton.setEnabled(true); submitButton.setStyleName(StyleConstants.DISABLED, false); } else { submitButton.setEnabled(false); submitButton.setStyleName(StyleConstants.DISABLED, true); } }
3.68
hbase_MetricsStochasticBalancer_updateMetricsSize
/** * Updates the number of metrics reported to JMX */ public void updateMetricsSize(int size) { stochasticSource.updateMetricsSize(size); }
3.68
flink_LinkElement_link
/** * Creates a link with a given url. This url will be used as a description for that link. * * @param link address that this link should point to * @return link representation */ public static LinkElement link(String link) { return new LinkElement(link, link); }
3.68
streampipes_CalculateDurationProcessor_declareModel
//TODO: Change Icon @Override public DataProcessorDescription declareModel() { return ProcessingElementBuilder.create("org.apache.streampipes.processors.transformation.jvm.duration-value") .category(DataProcessorType.TIME) .withLocales(Locales.EN) .withAssets(Assets.DOCUMENTATION, Assets.ICON) .requiredStream(StreamRequirementsBuilder.create() .requiredPropertyWithUnaryMapping(EpRequirements.timestampReq(), Labels.withId(START_TS_FIELD_ID), PropertyScope.NONE) .requiredPropertyWithUnaryMapping(EpRequirements.timestampReq(), Labels.withId(END_TS_FIELD_ID), PropertyScope.NONE) .build()) .requiredSingleValueSelection(Labels.withId(UNIT_FIELD_ID), Options.from(MS, SECONDS, MINUTES, HOURS)) .outputStrategy(OutputStrategies.append(EpProperties.doubleEp(Labels.empty(), DURATION_FIELD_NAME, SO.NUMBER))) .build(); }
3.68
framework_VComboBox_selectPrevItem
/** * Selects the previous item in the filtered selections. */ public void selectPrevItem() { debug("VComboBox.SP: selectPrevItem()"); final int index = menu.getSelectedIndex() - 1; if (index > -1) { selectItem(menu.getItems().get(index)); } else if (index == -1) { selectPrevPage(); } else { if (!menu.getItems().isEmpty()) { selectLastItem(); } } }
3.68
hadoop_ResourceRequest_isAnyLocation
/** * Check whether the given <em>host/rack</em> string represents an arbitrary * host name. * * @param hostName <em>host/rack</em> on which the allocation is desired * @return whether the given <em>host/rack</em> string represents an arbitrary * host name */ @Public @Stable public static boolean isAnyLocation(String hostName) { return ANY.equals(hostName); }
3.68
framework_Window_setCloseShortcut
/** * This is the old way of adding a keyboard shortcut to close a * {@link Window} - to preserve compatibility with existing code under the * new functionality, this method now first removes all registered close * shortcuts, then adds the default ESCAPE shortcut key, and then attempts * to add the shortcut provided as parameters to this method. This method, * and its companion {@link #removeCloseShortcut()}, are now considered * deprecated, as their main function is to preserve exact backwards * compatibility with old code. For all new code, use the new keyboard * shortcuts API: {@link #addCloseShortcut(int,int...)}, * {@link #removeCloseShortcut(int,int...)}, * {@link #removeAllCloseShortcuts()}, {@link #hasCloseShortcut(int,int...)} * and {@link #getCloseShortcuts()}. * <p> * Original description: Makes it possible to close the window by pressing * the given {@link KeyCode} and (optional) {@link ModifierKey}s.<br/> * Note that this shortcut only reacts while the window has focus, closing * itself - if you want to close a window from a UI, use * {@link UI#addAction(com.vaadin.event.Action)} of the UI instead. * * @param keyCode * the keycode for invoking the shortcut * @param modifiers * the (optional) modifiers for invoking the shortcut. Can be set * to null to be explicit about not having modifiers. * * @deprecated Use {@link #addCloseShortcut(int, int...)} instead. */ @Deprecated public void setCloseShortcut(int keyCode, int... modifiers) { removeCloseShortcut(); addCloseShortcut(keyCode, modifiers); }
3.68
morf_ObjectTreeTraverser_dispatch
/** * Invokes the callback on the object. If the object implements Driver then * its drive method is invoked, propagating this traverser. Iterables and * arrays will have the callback invoked on their elements. * * @param object the object node in the object graph. * @return this, for method chaining. * @param <T> the type of object to traverse */ public <T> ObjectTreeTraverser dispatch(T object) { if (object == null) { return this; } if (object instanceof Iterable<?>) { for (Object element : (Iterable<?>) object) { dispatch(element); } return this; } if (object instanceof Object[]) { for (Object element : (Object[]) object) { dispatch(element); } return this; } callback.visit(object); if (object instanceof Driver) { ((Driver) object).drive(this); } return this; }
3.68
hudi_HoodieUnMergedLogRecordScanner_scan
/** * Scans delta-log files processing blocks */ public final void scan() { scan(false); }
3.68
flink_ResourceInformationReflector_setResourceInformationUnSafe
/** * Same as {@link #setResourceInformation(Resource, String, long)} but allows to pass objects * that are not of type {@link Resource}. */ @VisibleForTesting void setResourceInformationUnSafe(Object resource, String resourceName, long amount) { if (!isYarnResourceTypesAvailable) { LOG.info( "Will not request extended resource {} because the used YARN version does not support it.", resourceName); return; } try { resourceSetResourceInformationMethod.invoke( resource, resourceName, resourceInformationNewInstanceMethod.invoke(null, resourceName, amount)); } catch (Exception e) { LOG.warn( "Error in setting the external resource {}. Will not request this resource from YARN.", resourceName, e); } }
3.68
framework_AbstractMedia_setMuted
/** * Set whether to mute the audio or not. * * @param muted */ public void setMuted(boolean muted) { getState().muted = muted; }
3.68
hadoop_CloseableReferenceCount_setClosed
/** * Mark the status as closed. * * Once the status is closed, it cannot be reopened. * * @return The current reference count. * @throws ClosedChannelException If someone else closes the object * before we do. */ public int setClosed() throws ClosedChannelException { while (true) { int curBits = status.get(); if ((curBits & STATUS_CLOSED_MASK) != 0) { throw new ClosedChannelException(); } if (status.compareAndSet(curBits, curBits | STATUS_CLOSED_MASK)) { return curBits & (~STATUS_CLOSED_MASK); } } }
3.68
zilla_HpackContext_staticIndex25
// Index in static table for the given name of length 25 private static int staticIndex25(DirectBuffer name) { return (name.getByte(24) == 'y' && STATIC_TABLE[56].name.equals(name)) ? 56 : -1; // strict-transport-security }
3.68
hbase_ZKAuthentication_loginClient
/** * Log in the current zookeeper client using the given configuration keys for the credential file * and login principal. * <p> * <strong>This is only applicable when running on secure hbase</strong> On regular HBase (without * security features), this will safely be ignored. * </p> * @param conf The configuration data to use * @param keytabFileKey Property key used to configure the path to the credential file * @param userNameKey Property key used to configure the login principal * @param hostname Current hostname to use in any credentials * @throws IOException underlying exception from SecurityUtil.login() call */ public static void loginClient(Configuration conf, String keytabFileKey, String userNameKey, String hostname) throws IOException { login(conf, keytabFileKey, userNameKey, hostname, ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY, JaasConfiguration.CLIENT_KEYTAB_KERBEROS_CONFIG_NAME); }
3.68
hadoop_OBSFileSystem_getObsListing
/** * Return the OBSListing instance used by this filesystem. * * @return the OBSListing instance */ OBSListing getObsListing() { return obsListing; }
3.68
flink_ExecutionConfig_getDefaultKryoSerializers
/** Returns the registered default Kryo Serializers. */ public LinkedHashMap<Class<?>, SerializableSerializer<?>> getDefaultKryoSerializers() { return defaultKryoSerializers; }
3.68
dubbo_RegistrySpecListener_onPostOfDirectory
/** * Every time an event is triggered, multiple fixed key related to directory are increment, which has nothing to do with the monitored key */ public static AbstractMetricsKeyListener onPostOfDirectory( MetricsKey metricsKey, CombMetricsCollector<?> collector) { return AbstractMetricsKeyListener.onEvent(metricsKey, event -> { Map<MetricsKey, Map<String, Integer>> summaryMap = event.getAttachmentValue(ATTACHMENT_DIRECTORY_MAP); Map<String, String> otherAttachments = new HashMap<>(); for (Map.Entry<String, Object> entry : event.getAttachments().entrySet()) { if (entry.getValue() instanceof String) { otherAttachments.put(entry.getKey().toLowerCase(Locale.ROOT), (String) entry.getValue()); } } summaryMap.forEach((summaryKey, map) -> map.forEach((k, v) -> { if (CollectionUtils.isEmptyMap(otherAttachments)) { collector.setNum(new MetricsKeyWrapper(summaryKey, OP_TYPE_DIRECTORY), k, v); } else { ((RegistryMetricsCollector) collector) .setNum(new MetricsKeyWrapper(summaryKey, OP_TYPE_DIRECTORY), k, v, otherAttachments); } })); }); }
3.68
hudi_HoodieListData_lazy
/** * Creates instance of {@link HoodieListData} bearing *lazy* execution semantic * * @param listData a {@link List} of objects in type T * @param <T> type of object * @return a new instance containing the {@link List<T>} reference */ public static <T> HoodieListData<T> lazy(List<T> listData) { return new HoodieListData<>(listData, true); }
3.68
rocketmq-connect_JdbcSourceConnector_taskConfigs
/** * Returns a set of configurations for Tasks based on the current configuration, * producing at most count configurations. * * @param maxTasks maximum number of configurations to generate * @return configurations for Tasks */ @Override public List<KeyValue> taskConfigs(int maxTasks) { log.info("Connector task config divide[" + maxTasks + "]"); List<KeyValue> keyValues = Lists.newArrayList(); List<String> tables = Lists.newArrayList(); log.info("Connector table white list[" + jdbcSourceConfig.getTableWhitelist() + "]"); jdbcSourceConfig.getTableWhitelist().forEach(table -> { tables.add(table); }); maxTasks = tables.size() > maxTasks ? maxTasks : tables.size(); List<List<String>> tablesGrouped = ConnectorGroupUtils.groupPartitions(tables, maxTasks); for (List<String> tableGroup : tablesGrouped) { KeyValue keyValue = new DefaultKeyValue(); for (String key : originalConfig.keySet()) { keyValue.put(key, originalConfig.getString(key)); } keyValue.put(JdbcSourceTaskConfig.TABLES_CONFIG, StringUtils.join(tableGroup, ",")); keyValues.add(keyValue); } return keyValues; }
3.68
flink_AbstractPythonStreamGroupAggregateOperator_onProcessingTime
/** Invoked when a processing-time timer fires. */ @Override public void onProcessingTime(InternalTimer<RowData, VoidNamespace> timer) throws Exception { if (stateCleaningEnabled) { RowData key = timer.getKey(); long timestamp = timer.getTimestamp(); reuseTimerRowData.setLong(2, timestamp); reuseTimerRowData.setField(3, key); udfInputTypeSerializer.serialize(reuseTimerRowData, baosWrapper); pythonFunctionRunner.process(baos.toByteArray()); baos.reset(); elementCount++; } }
3.68
hadoop_HsController_tasksPage
/* * (non-Javadoc) * @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#tasksPage() */ @Override protected Class<? extends View> tasksPage() { return HsTasksPage.class; }
3.68
hadoop_EditLogInputStream_nextValidOp
/** * Get the next valid operation from the stream storage. * * This is exactly like nextOp, except that we attempt to skip over damaged * parts of the edit log * * @return an operation from the stream or null if at end of stream */ protected FSEditLogOp nextValidOp() { // This is a trivial implementation which just assumes that any errors mean // that there is nothing more of value in the log. Subclasses that support // error recovery will want to override this. try { return nextOp(); } catch (Throwable e) { return null; } }
3.68
framework_GeneratedPropertyContainer_getWrappedContainer
/** * Returns the original underlying container. * * @return the original underlying container */ public Container.Indexed getWrappedContainer() { return wrappedContainer; }
3.68
framework_SliderElement_getValue
/** * Get value of the slider * * Warning! This method cause slider popup to appear on the screen. To hide * this popup just focus any other element on the page. */ public String getValue() { WebElement popupElem = findElement(By.vaadin("#popup")); return popupElem.getAttribute("textContent"); }
3.68
flink_MemorySegment_put
/** * Bulk put method. Copies {@code numBytes} bytes from the given {@code ByteBuffer}, into this * memory segment. The bytes will be read from the target buffer starting at the buffer's * current position, and will be written to this memory segment starting at {@code offset}. If * this method attempts to read more bytes than the target byte buffer has remaining (with * respect to {@link ByteBuffer#remaining()}), this method will cause a {@link * java.nio.BufferUnderflowException}. * * @param offset The position where the bytes are started to be written to in this memory * segment. * @param source The ByteBuffer to copy the bytes from. * @param numBytes The number of bytes to copy. * @throws IndexOutOfBoundsException If the offset is invalid, or the source buffer does not * contain the given number of bytes, or this segment does not have enough space for the * bytes (counting from offset). */ public void put(int offset, ByteBuffer source, int numBytes) { // check the byte array offset and length if ((offset | numBytes | (offset + numBytes)) < 0) { throw new IndexOutOfBoundsException(); } final int sourceOffset = source.position(); final int remaining = source.remaining(); if (remaining < numBytes) { throw new BufferUnderflowException(); } if (source.isDirect()) { // copy to the target memory directly final long sourcePointer = getByteBufferAddress(source) + sourceOffset; final long targetPointer = address + offset; if (targetPointer <= addressLimit - numBytes) { UNSAFE.copyMemory(null, sourcePointer, heapMemory, targetPointer, numBytes); source.position(sourceOffset + numBytes); } else if (address > addressLimit) { throw new IllegalStateException("segment has been freed"); } else { throw new IndexOutOfBoundsException(); } } else if (source.hasArray()) { // move directly into the byte array put(offset, source.array(), sourceOffset + source.arrayOffset(), numBytes); // this must be after the get() call to ensue that the byte buffer is not // modified in case the call fails source.position(sourceOffset + numBytes); } else { // other types of byte buffers for (int i = 0; i < numBytes; i++) { put(offset++, source.get()); } } }
3.68
flink_CompositeTypeSerializerUtil_constructIntermediateCompatibilityResult
/** * Constructs an {@link IntermediateCompatibilityResult} with the given array of nested * serializers and their corresponding serializer snapshots. * * <p>This result is considered "intermediate", because the actual final result is not yet built * if it isn't defined. This is the case if the final result is supposed to be {@link * TypeSerializerSchemaCompatibility#compatibleWithReconfiguredSerializer(TypeSerializer)}, * where construction of the reconfigured serializer instance should be done by the caller. * * <p>For other cases, i.e. {@link TypeSerializerSchemaCompatibility#compatibleAsIs()}, {@link * TypeSerializerSchemaCompatibility#compatibleAfterMigration()}, and {@link * TypeSerializerSchemaCompatibility#incompatible()}, these results are considered final. * * @param newNestedSerializers the new nested serializers to check for compatibility. * @param nestedSerializerSnapshots the associated nested serializers' snapshots. * @return the intermediate compatibility result of the new nested serializers. */ public static <T> IntermediateCompatibilityResult<T> constructIntermediateCompatibilityResult( TypeSerializer<?>[] newNestedSerializers, TypeSerializerSnapshot<?>[] nestedSerializerSnapshots) { Preconditions.checkArgument( newNestedSerializers.length == nestedSerializerSnapshots.length, "Different number of new serializers and existing serializer snapshots."); TypeSerializer<?>[] nestedSerializers = new TypeSerializer[newNestedSerializers.length]; // check nested serializers for compatibility boolean nestedSerializerRequiresMigration = false; boolean hasReconfiguredNestedSerializers = false; for (int i = 0; i < nestedSerializerSnapshots.length; i++) { TypeSerializerSchemaCompatibility<?> compatibility = resolveCompatibility(newNestedSerializers[i], nestedSerializerSnapshots[i]); // if any one of the new nested serializers is incompatible, we can just short circuit // the result if (compatibility.isIncompatible()) { return IntermediateCompatibilityResult.definedIncompatibleResult(); } if (compatibility.isCompatibleAfterMigration()) { nestedSerializerRequiresMigration = true; } else if (compatibility.isCompatibleWithReconfiguredSerializer()) { hasReconfiguredNestedSerializers = true; nestedSerializers[i] = compatibility.getReconfiguredSerializer(); } else if (compatibility.isCompatibleAsIs()) { nestedSerializers[i] = newNestedSerializers[i]; } else { throw new IllegalStateException("Undefined compatibility type."); } } if (nestedSerializerRequiresMigration) { return IntermediateCompatibilityResult.definedCompatibleAfterMigrationResult(); } if (hasReconfiguredNestedSerializers) { return IntermediateCompatibilityResult.undefinedReconfigureResult(nestedSerializers); } // ends up here if everything is compatible as is return IntermediateCompatibilityResult.definedCompatibleAsIsResult(nestedSerializers); }
3.68
flink_RestfulGateway_requestJob
/** * Requests the {@link ArchivedExecutionGraph} for the given jobId. If there is no such graph, * then the future is completed with a {@link FlinkJobNotFoundException}. * * @param jobId identifying the job whose {@link ArchivedExecutionGraph} is requested * @param timeout for the asynchronous operation * @return Future containing the {@link ArchivedExecutionGraph} for the given jobId, otherwise * {@link FlinkJobNotFoundException} */ default CompletableFuture<ArchivedExecutionGraph> requestJob( JobID jobId, @RpcTimeout Time timeout) { return requestExecutionGraphInfo(jobId, timeout) .thenApply(ExecutionGraphInfo::getArchivedExecutionGraph); }
3.68
pulsar_ModularLoadManagerImpl_updateBundleSplitMetrics
/** * As leader broker, update bundle split metrics. * * @param bundlesSplit the number of bundles splits */ private void updateBundleSplitMetrics(int bundlesSplit) { bundleSplitCount += bundlesSplit; List<Metrics> metrics = new ArrayList<>(); Map<String, String> dimensions = new HashMap<>(); dimensions.put("metric", "bundlesSplit"); Metrics m = Metrics.create(dimensions); m.put("brk_lb_bundles_split_total", bundleSplitCount); metrics.add(m); this.bundleSplitMetrics.set(metrics); }
3.68
framework_NullValidator_validate
/** * Validates the data given in value. * * @param value * the value to validate. * @throws Validator.InvalidValueException * if the value was invalid. */ @Override public void validate(Object value) throws Validator.InvalidValueException { if ((onlyNullAllowed && value != null) || (!onlyNullAllowed && value == null)) { throw new Validator.InvalidValueException(errorMessage); } }
3.68
hadoop_InMemoryConfigurationStore_getCurrentVersion
/** * Configuration mutations not logged (i.e. not persisted). As such, they are * not persisted and not versioned. Hence, a current version is not * applicable. * @return null A current version not applicable for this store. */ @Override public Version getCurrentVersion() { // Does nothing. return null; }
3.68
hibernate-validator_AnnotationApiHelper_getDeclaredTypeByName
/** * Returns the {@link DeclaredType} for the given class name. * * @param className A fully qualified class name, e.g. "java.lang.String". * * @return A {@link DeclaredType} representing the type with the given name, * or null, if no such type exists. */ public DeclaredType getDeclaredTypeByName(String className) { TypeElement typeElement = elementUtils.getTypeElement( className ); return typeElement != null ? typeUtils.getDeclaredType( typeElement ) : null; }
3.68
hbase_JVM_isAmd64
/** * Check if the arch is amd64; * @return whether this is amd64 or not. */ public static boolean isAmd64() { return amd64; }
3.68
framework_FieldGroup_buildAndBind
/** * Builds a field using the given caption and binds it to the given property * id using the field binder. * * @param caption * The caption for the field * @param propertyId * The property id to bind to. Must be present in the field * finder. * @throws BindException * If there is a problem while building or binding * @return The created and bound field. Can be any type of {@link Field}. */ public Field<?> buildAndBind(String caption, Object propertyId) throws BindException { return buildAndBind(caption, propertyId, Field.class); } /** * Builds a field using the given caption and binds it to the given property * id using the field binder. Ensures the new field is of the given type. * * @param caption * The caption for the field * @param propertyId * The property id to bind to. Must be present in the field * finder. * @throws BindException * If the field could not be created * @return The created and bound field. Can be any type of {@link Field}
3.68
hadoop_FSBuilder_optLong
/** * Set optional long parameter for the Builder. * * @param key key. * @param value value. * @return generic type B. * @see #opt(String, String) */ default B optLong(@Nonnull String key, long value) { return opt(key, Long.toString(value)); }
3.68
framework_GridElement_getField
/** * Gets the editor field for column in given index. * * @param colIndex * the column index * @return the editor field for given location * * @throws NoSuchElementException * if {@code isEditable(colIndex) == false} */ public TestBenchElement getField(int colIndex) { return grid.getSubPart("#editor[" + colIndex + "]"); }
3.68
morf_SqlDialect_getForUpdateSql
/** * Default behaviour for FOR UPDATE. Can be overridden. * @return The String representation of the FOR UPDATE clause. */ protected String getForUpdateSql() { return " FOR UPDATE"; }
3.68
graphhopper_QueryOverlayBuilder_buildVirtualEdges
/** * For all specified snaps calculate the snapped point and if necessary set the closest node * to a virtual one and reverse the closest edge. Additionally the wayIndex can change if an edge is * swapped. */ private void buildVirtualEdges(List<Snap> snaps) { GHIntObjectHashMap<List<Snap>> edge2res = new GHIntObjectHashMap<>(snaps.size()); // Phase 1 // calculate snapped point and swap direction of closest edge if necessary for (Snap snap : snaps) { // Do not create virtual node for a snap if it is directly on a tower node or not found if (snap.getSnappedPosition() == Snap.Position.TOWER) continue; EdgeIteratorState closestEdge = snap.getClosestEdge(); if (closestEdge == null) throw new IllegalStateException("Do not call QueryGraph.create with invalid Snap " + snap); int base = closestEdge.getBaseNode(); // Force the identical direction for all closest edges. // It is important to sort multiple results for the same edge by its wayIndex boolean doReverse = base > closestEdge.getAdjNode(); if (base == closestEdge.getAdjNode()) { // check for special case #162 where adj == base and force direction via latitude comparison PointList pl = closestEdge.fetchWayGeometry(FetchMode.PILLAR_ONLY); if (pl.size() > 1) doReverse = pl.getLat(0) > pl.getLat(pl.size() - 1); } if (doReverse) { closestEdge = closestEdge.detach(true); PointList fullPL = closestEdge.fetchWayGeometry(FetchMode.ALL); snap.setClosestEdge(closestEdge); if (snap.getSnappedPosition() == Snap.Position.PILLAR) // ON pillar node snap.setWayIndex(fullPL.size() - snap.getWayIndex() - 1); else // for case "OFF pillar node" snap.setWayIndex(fullPL.size() - snap.getWayIndex() - 2); if (snap.getWayIndex() < 0) throw new IllegalStateException("Problem with wayIndex while reversing closest edge:" + closestEdge + ", " + snap); } // find multiple results on same edge int edgeId = closestEdge.getEdge(); List<Snap> list = edge2res.get(edgeId); if (list == null) { list = new ArrayList<>(5); edge2res.put(edgeId, list); } list.add(snap); } // Phase 2 - now it is clear which points cut one edge // 1. create point lists // 2. create virtual edges between virtual nodes and its neighbor (virtual or normal nodes) edge2res.forEach(new IntObjectPredicate<List<Snap>>() { @Override public boolean apply(int edgeId, List<Snap> results) { // we can expect at least one entry in the results EdgeIteratorState closestEdge = results.get(0).getClosestEdge(); final PointList fullPL = closestEdge.fetchWayGeometry(FetchMode.ALL); int baseNode = closestEdge.getBaseNode(); Collections.sort(results, new Comparator<Snap>() { @Override public int compare(Snap o1, Snap o2) { int diff = Integer.compare(o1.getWayIndex(), o2.getWayIndex()); if (diff == 0) { return Double.compare(distanceOfSnappedPointToPillarNode(o1), distanceOfSnappedPointToPillarNode(o2)); } else { return diff; } } private double distanceOfSnappedPointToPillarNode(Snap o) { GHPoint snappedPoint = o.getSnappedPoint(); double fromLat = fullPL.getLat(o.getWayIndex()); double fromLon = fullPL.getLon(o.getWayIndex()); return DistancePlaneProjection.DIST_PLANE.calcNormalizedDist(fromLat, fromLon, snappedPoint.lat, snappedPoint.lon); } }); GHPoint3D prevPoint = fullPL.get(0); int adjNode = closestEdge.getAdjNode(); int origEdgeKey = closestEdge.getEdgeKey(); int origRevEdgeKey = closestEdge.getReverseEdgeKey(); int prevWayIndex = 1; int prevNodeId = baseNode; int virtNodeId = queryOverlay.getVirtualNodes().size() + firstVirtualNodeId; boolean addedEdges = false; // Create base and adjacent PointLists for all non-equal virtual nodes. // We do so via inserting them at the correct position of fullPL and cutting the // fullPL into the right pieces. for (int i = 0; i < results.size(); i++) { Snap res = results.get(i); if (res.getClosestEdge().getBaseNode() != baseNode) throw new IllegalStateException("Base nodes have to be identical but were not: " + closestEdge + " vs " + res.getClosestEdge()); GHPoint3D currSnapped = res.getSnappedPoint(); // no new virtual nodes if very close ("snap" together) if (Snap.considerEqual(prevPoint.lat, prevPoint.lon, currSnapped.lat, currSnapped.lon)) { res.setClosestNode(prevNodeId); res.setSnappedPoint(prevPoint); res.setWayIndex(i == 0 ? 0 : results.get(i - 1).getWayIndex()); res.setSnappedPosition(i == 0 ? Snap.Position.TOWER : results.get(i - 1).getSnappedPosition()); res.setQueryDistance(DIST_PLANE.calcDist(prevPoint.lat, prevPoint.lon, res.getQueryPoint().lat, res.getQueryPoint().lon)); continue; } queryOverlay.getClosestEdges().add(res.getClosestEdge().getEdge()); boolean isPillar = res.getSnappedPosition() == Snap.Position.PILLAR; createEdges(origEdgeKey, origRevEdgeKey, prevPoint, prevWayIndex, isPillar, res.getSnappedPoint(), res.getWayIndex(), fullPL, closestEdge, prevNodeId, virtNodeId); queryOverlay.getVirtualNodes().add(currSnapped.lat, currSnapped.lon, currSnapped.ele); // add edges again to set adjacent edges for newVirtNodeId if (addedEdges) { queryOverlay.addVirtualEdge(queryOverlay.getVirtualEdge(queryOverlay.getNumVirtualEdges() - 2)); queryOverlay.addVirtualEdge(queryOverlay.getVirtualEdge(queryOverlay.getNumVirtualEdges() - 2)); } addedEdges = true; res.setClosestNode(virtNodeId); prevNodeId = virtNodeId; prevWayIndex = res.getWayIndex() + 1; prevPoint = currSnapped; virtNodeId++; } // two edges between last result and adjacent node are still missing if not all points skipped if (addedEdges) createEdges(origEdgeKey, origRevEdgeKey, prevPoint, prevWayIndex, false, fullPL.get(fullPL.size() - 1), fullPL.size() - 2, fullPL, closestEdge, virtNodeId - 1, adjNode); return true; } }); }
3.68
druid_MySqlStatementParser_parseRepeat
/** * parse repeat statement with label * * @param label */ public MySqlRepeatStatement parseRepeat(String label) { MySqlRepeatStatement repeatStmt = new MySqlRepeatStatement(); repeatStmt.setLabelName(label); accept(Token.REPEAT); this.parseStatementList(repeatStmt.getStatements(), -1, repeatStmt); accept(Token.UNTIL); repeatStmt.setCondition(exprParser.expr()); accept(Token.END); accept(Token.REPEAT); acceptIdentifier(label); accept(Token.SEMI); return repeatStmt; }
3.68
framework_ApplicationConfiguration_useServiceUrlPathParam
/** * Checks whether path info in requests to the server-side service should be * in a request parameter (named <code>v-resourcePath</code>) or appended to * the end of the service URL. * * @see #getServiceUrl() * * @return <code>true</code> if path info should be a request parameter; * <code>false</code> if the path info goes after the service URL */ public boolean useServiceUrlPathParam() { return getServiceUrlParameterName() != null; }
3.68
flink_JobMaster_declineCheckpoint
// TODO: This method needs a leader session ID @Override public void declineCheckpoint(DeclineCheckpoint decline) { schedulerNG.declineCheckpoint(decline); }
3.68
hadoop_DomainRowKey_parseRowKeyFromString
/** * Given the encoded row key as string, returns the row key as an object. * @param encodedRowKey String representation of row key. * @return A <cite>DomainRowKey</cite> object. */ public static DomainRowKey parseRowKeyFromString(String encodedRowKey) { return new DomainRowKeyConverter().decodeFromString(encodedRowKey); }
3.68
framework_VScrollTable_updateMaxIndent
/** * For internal use only. May be removed or replaced in the future. */ public void updateMaxIndent() { int oldIndent = scrollBody.getMaxIndent(); scrollBody.calculateMaxIndent(); if (oldIndent != scrollBody.getMaxIndent()) { // indent updated, headers might need adjusting triggerLazyColumnAdjustment(true); } }
3.68
morf_InlineTableUpgrader_visitPortableSqlStatement
/** * Write the sql statement. * * @param sql The {@link PortableSqlStatement} */ private void visitPortableSqlStatement(PortableSqlStatement sql) { sql.inplaceUpdateTransitionalTableNames(tracker); writeStatement(sql.getStatement(sqlDialect.getDatabaseType().identifier(), sqlDialect.schemaNamePrefix())); }
3.68
flink_JobGraph_getVerticesAsArray
/** * Returns an array of all job vertices that are registered with the job graph. The order in * which the vertices appear in the list is not defined. * * @return an array of all job vertices that are registered with the job graph */ public JobVertex[] getVerticesAsArray() { return this.taskVertices.values().toArray(new JobVertex[this.taskVertices.size()]); }
3.68
framework_CheckBoxGroup_setHtmlContentAllowed
/** * Sets whether html is allowed in the item captions. If set to true, the * captions are passed to the browser as html and the developer is * responsible for ensuring no harmful html is used. If set to false, the * content is passed to the browser as plain text. * * @param htmlContentAllowed * true if the captions are used as html, false if used as plain * text */ public void setHtmlContentAllowed(boolean htmlContentAllowed) { getState().htmlContentAllowed = htmlContentAllowed; }
3.68
flink_CollectionUtil_iterableToList
/** * Collects the elements in the Iterable in a List. If the iterable argument is null, this * method returns an empty list. */ public static <E> List<E> iterableToList(@Nullable Iterable<E> iterable) { if (iterable == null) { return Collections.emptyList(); } final ArrayList<E> list = new ArrayList<>(); iterable.iterator().forEachRemaining(list::add); return list; }
3.68
framework_AbstractTransactionalQuery_ensureTransaction
/** * Check that a transaction is active. * * @throws SQLException * if no active transaction */ protected void ensureTransaction() throws SQLException { if (!isInTransaction()) { throw new SQLException("No active transaction!"); } }
3.68
hadoop_SinglePendingCommit_getText
/** * Arbitrary notes. * @return any notes */ public String getText() { return text; }
3.68
hadoop_WasbTokenRenewer_handleKind
/** * Checks if this particular object handles the Kind of token passed. * @param kind the kind of the token * @return true if it handles passed token kind false otherwise. */ @Override public boolean handleKind(Text kind) { return WasbDelegationTokenIdentifier.TOKEN_KIND.equals(kind); }
3.68
graphhopper_NodeBasedWitnessPathSearcher_getMemoryUsageAsString
/** * @return currently used memory in MB (approximately) */ public String getMemoryUsageAsString() { return (8L * weights.length + changedNodes.buffer.length * 4L + heap.getMemoryUsage() ) / Helper.MB + "MB"; }
3.68
hadoop_DecodingValidator_validate
/** * Validate outputs decoded from inputs, by decoding an input back from * those outputs and comparing it with the original one. * @param inputs input buffers used for decoding * @param erasedIndexes indexes of erased units used for decoding * @param outputs decoded output buffers * @throws IOException raised on errors performing I/O. */ public void validate(ECChunk[] inputs, int[] erasedIndexes, ECChunk[] outputs) throws IOException { ByteBuffer[] newInputs = CoderUtil.toBuffers(inputs); ByteBuffer[] newOutputs = CoderUtil.toBuffers(outputs); validate(newInputs, erasedIndexes, newOutputs); }
3.68
hbase_ReplicationUtils_isReplicationForBulkLoadDataEnabled
/** * @param c Configuration to look at * @return True if replication for bulk load data is enabled. */ public static boolean isReplicationForBulkLoadDataEnabled(final Configuration c) { return c.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT); }
3.68
framework_FilesystemContainer_removeItemProperty
/** * Filesystem container does not support removing properties. * * @see Item#removeItemProperty(Object) */ @Override public boolean removeItemProperty(Object id) throws UnsupportedOperationException { throw new UnsupportedOperationException( "Filesystem container does not support property removal"); }
3.68
pulsar_ResourceGroupService_getRgLocalUsageByteCount
// Visibility for testing. protected static double getRgLocalUsageByteCount (String rgName, String monClassName) { return rgLocalUsageBytes.labels(rgName, monClassName).get(); }
3.68
dubbo_AbstractJSONImpl_checkObjectList
/** * Casts a list of unchecked JSON values to a list of checked objects in Java type. * If the given list contains a value that is not a Map, throws an exception. */ @SuppressWarnings("unchecked") @Override public List<Map<String, ?>> checkObjectList(List<?> rawList) { assert rawList != null; for (int i = 0; i < rawList.size(); i++) { if (!(rawList.get(i) instanceof Map)) { throw new ClassCastException( String.format("value %s for idx %d in %s is not object", rawList.get(i), i, rawList)); } } return (List<Map<String, ?>>) rawList; }
3.68
querydsl_JTSGeometryExpressions_polygonOperation
/** * Create a new Polygon operation expression * * @param op operator * @param args arguments * @return operation expression */ public static JTSPolygonExpression<Polygon> polygonOperation(Operator op, Expression<?>... args) { return new JTSPolygonOperation<Polygon>(Polygon.class, op, args); }
3.68
hbase_RecoverLeaseFSUtils_isFileClosed
/** * Call HDFS-4525 isFileClosed if it is available. * @return True if file is closed. */ private static boolean isFileClosed(final DistributedFileSystem dfs, final Method m, final Path p) { try { return (Boolean) m.invoke(dfs, p); } catch (SecurityException e) { LOG.warn("No access", e); } catch (Exception e) { LOG.warn("Failed invocation for " + p.toString(), e); } return false; }
3.68
hbase_RegionLocations_numNonNullElements
/** * Returns the size of not-null locations * @return the size of not-null locations */ public int numNonNullElements() { return numNonNullElements; }
3.68
hbase_TableListModel_setTables
/** * @param tables the tables to set */ public void setTables(List<TableModel> tables) { this.tables = tables; }
3.68
hudi_ExternalFilePathUtil_isExternallyCreatedFile
/** * Checks if the file name was created by an external system by checking for the external file marker at the end of the file name. * @param fileName The file name * @return True if the file was created by an external system, false otherwise */ public static boolean isExternallyCreatedFile(String fileName) { return fileName.endsWith(EXTERNAL_FILE_SUFFIX); }
3.68
dubbo_Bytes_zip
/** * zip. * * @param bytes source. * @return compressed byte array. * @throws IOException */ public static byte[] zip(byte[] bytes) throws IOException { UnsafeByteArrayOutputStream bos = new UnsafeByteArrayOutputStream(); OutputStream os = new DeflaterOutputStream(bos); try { os.write(bytes); } finally { os.close(); bos.close(); } return bos.toByteArray(); }
3.68
hbase_FutureUtils_consume
/** * Log the error if the future indicates any failure. */ public static void consume(CompletableFuture<?> future) { addListener(future, (r, e) -> { if (e != null) { LOG.warn("Async operation fails", e); } }); }
3.68
hadoop_DynamicIOStatisticsBuilder_withAtomicIntegerMaximum
/** * Add a maximum statistic to dynamically return the * latest value of the source. * @param key key of this statistic * @param source atomic int maximum * @return the builder. */ public DynamicIOStatisticsBuilder withAtomicIntegerMaximum(String key, AtomicInteger source) { withLongFunctionMaximum(key, s -> source.get()); return this; }
3.68
hbase_Client_getExtraHeader
/** * Get an extra header value. */ public String getExtraHeader(final String name) { return extraHeaders.get(name); }
3.68
morf_FieldReference_direction
/** * Sets the direction to sort the field on. * * @param direction the direction to set * @return this */ public Builder direction(Direction direction) { this.direction = direction; return this; }
3.68
hadoop_SimpleTcpServer_getBoundPort
// boundPort will be set only after server starts public int getBoundPort() { return this.boundPort; }
3.68
hbase_RestoreSnapshotProcedure_addRegionsToInMemoryStates
/** * Add regions to in-memory states * @param regionInfos regions to add * @param env MasterProcedureEnv * @param regionReplication the number of region replications */ private void addRegionsToInMemoryStates(List<RegionInfo> regionInfos, MasterProcedureEnv env, int regionReplication) { AssignmentManager am = env.getAssignmentManager(); for (RegionInfo regionInfo : regionInfos) { if (regionInfo.isSplit()) { am.getRegionStates().updateRegionState(regionInfo, RegionState.State.SPLIT); } else { am.getRegionStates().updateRegionState(regionInfo, RegionState.State.CLOSED); // For region replicas for (int i = 1; i < regionReplication; i++) { RegionInfo regionInfoForReplica = RegionReplicaUtil.getRegionInfoForReplica(regionInfo, i); am.getRegionStates().updateRegionState(regionInfoForReplica, RegionState.State.CLOSED); } } } }
3.68
framework_VScrollTable_updateBody
/** * For internal use only. May be removed or replaced in the future. * * @param uidl * which contains row data * @param firstRow * first row in data set * @param reqRows * amount of rows in data set */ public void updateBody(UIDL uidl, int firstRow, int reqRows) { int oldIndent = scrollBody.getMaxIndent(); if (uidl == null || reqRows < 1) { // container is empty, remove possibly existing rows if (firstRow <= 0) { postponeSanityCheckForLastRendered = true; while (scrollBody.getLastRendered() > scrollBody .getFirstRendered()) { scrollBody.unlinkRow(false); } postponeSanityCheckForLastRendered = false; scrollBody.unlinkRow(false); } return; } scrollBody.renderRows(uidl, firstRow, reqRows); discardRowsOutsideCacheWindow(); scrollBody.calculateMaxIndent(); if (oldIndent != scrollBody.getMaxIndent()) { // indent updated, headers might need adjusting headerChangedDuringUpdate = true; } }
3.68
flink_LegacySourceTransformation_getOperatorFactory
/** Returns the {@code StreamOperatorFactory} of this {@code LegacySourceTransformation}. */ public StreamOperatorFactory<T> getOperatorFactory() { return operatorFactory; }
3.68