name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_ExecNodeContext_withId
/** * Set the unique ID of the node, so that the {@link ExecNodeContext}, together with the type * related {@link #name} and {@link #version}, stores all the necessary info to uniquely * reconstruct the {@link ExecNode}, and avoid storing the {@link #id} independently as a field * in {@link ExecNodeBase}. */ public ExecNodeContext withId(int id) { return new ExecNodeContext(id, this.name, this.version); }
3.68
Activiti_ReflectUtil_getField
/** * Returns the field of the given class or null if it doesn't exist. */ public static Field getField(String fieldName, Class<?> clazz) { Field field = null; try { field = clazz.getDeclaredField(fieldName); } catch (SecurityException e) { throw new ActivitiException("not allowed to access field " + field + " on class " + clazz.getCanonicalName()); } catch (NoSuchFieldException e) { // for some reason getDeclaredFields doesn't search superclasses // (which getFields() does ... but that gives only public fields) Class<?> superClass = clazz.getSuperclass(); if (superClass != null) { return getField(fieldName, superClass); } } return field; }
3.68
morf_ColumnBean_isAutoNumbered
/** * @see org.alfasoftware.morf.metadata.Column#isAutoNumbered() */ @Override public boolean isAutoNumbered() { return autoNumber; }
3.68
hbase_ScheduledChore_getTimeBetweenRuns
/** * Return how long in millis has it been since this chore last run. Useful for checking if the * chore has missed its scheduled start time by too large of a margin */ synchronized long getTimeBetweenRuns() { return timeOfThisRun - timeOfLastRun; }
3.68
hudi_OptionsResolver_getConflictResolutionStrategy
/** * Returns the conflict resolution strategy. */ public static ConflictResolutionStrategy getConflictResolutionStrategy(Configuration conf) { return isBucketIndexType(conf) ? new BucketIndexConcurrentFileWritesConflictResolutionStrategy() : new SimpleConcurrentFileWritesConflictResolutionStrategy(); }
3.68
pulsar_SinkContext_pause
/** * Stop requesting new messages for given topic and partition until {@link #resume(String topic, int partition)} * is called. * * @param topic - topic name * @param partition - partition id (0 for non-partitioned topics) */ default void pause(String topic, int partition) throws PulsarClientException { throw new UnsupportedOperationException("not implemented"); }
3.68
hadoop_RegistryTypeUtils_map
/** * Create a single entry map * @param key map entry key * @param val map entry value * @return a 1 entry map. */ public static Map<String, String> map(String key, String val) { Map<String, String> map = new HashMap<String, String>(1); map.put(key, val); return map; }
3.68
framework_InputPromptGetText_setup
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server. * VaadinRequest) */ @Override protected void setup(VaadinRequest request) { final TextField tf = new TextField(); tf.setId(FIELD); tf.setInputPrompt("input text here"); tf.setImmediate(true); tf.setNullRepresentation(""); Button button = new Button("Click Me"); button.setId(BUTTON); button.addClickListener(event -> { String input = tf.getValue(); Label label = new Label("Your input was: " + input); label.setId(LABEL2); getLayout().addComponent(label); }); tf.addShortcutListener( new ShortcutListener("Shortcut", KeyCode.ENTER, null) { @Override public void handleAction(Object sender, Object target) { String input = tf.getValue(); Label label = new Label("Your input was: " + input); label.setId(LABEL1); getLayout().addComponent(label); } }); getLayout().addComponent(tf); getLayout().addComponent(button); }
3.68
rocketmq-connect_ByteArrayConverter_configure
/** * Configure this class. * * @param configs configs in key/value pairs */ @Override public void configure(Map configs) { // config }
3.68
framework_VComboBox_reset
/** * Resets the ComboBox to its initial state. */ private void reset() { debug("VComboBox: reset()"); // just fetch selected information from state String text = connector.getState().selectedItemCaption; setText(text == null ? getEmptySelectionCaption() : text); setSelectedItemIcon(connector.getState().selectedItemIcon); selectedOptionKey = (connector.getState().selectedItemKey); if (selectedOptionKey == null || selectedOptionKey.isEmpty()) { currentSuggestion = null; // #13217 selectedOptionKey = null; updatePlaceholder(); } else { currentSuggestion = currentSuggestions.stream() .filter(suggestion -> suggestion.getOptionKey() .equals(selectedOptionKey)) .findAny().orElse(null); } suggestionPopup.hide(); }
3.68
hadoop_ManifestCommitterSupport_getPendingJobAttemptsPath
/** * Get the location of pending job attempts. * @param out the base output directory. * @return the location of pending job attempts. */ public static Path getPendingJobAttemptsPath(Path out) { return new Path(out, PENDING_DIR_NAME); }
3.68
morf_WindowFunction_deepCopyInternal
/** * @see org.alfasoftware.morf.sql.element.AliasedField#deepCopyInternal(DeepCopyTransformation) */ @Override protected AliasedField deepCopyInternal(DeepCopyTransformation transformer) { return new WindowFunction( getAlias(), (Function) transformer.deepCopy(function), transformIterable(orderBys, transformer), transformIterable(partitionBys, transformer)); }
3.68
dubbo_ClassSourceScanner_configClasses
/** * The required configuration class, which is a subclass of AbstractConfig, * but which excludes abstract classes. * @return configuration class */ public List<Class<?>> configClasses() { return getClasses().values().stream() .filter(c -> AbstractConfig.class.isAssignableFrom(c) && !Modifier.isAbstract(c.getModifiers())) .collect(Collectors.toList()); }
3.68
hbase_TableRecordReaderImpl_setHTable
/** * @param htable the table to scan. */ public void setHTable(Table htable) { Configuration conf = htable.getConfiguration(); logScannerActivity = conf.getBoolean(ConnectionConfiguration.LOG_SCANNER_ACTIVITY, false); logPerRowCount = conf.getInt(LOG_PER_ROW_COUNT, 100); this.htable = htable; }
3.68
framework_Upload_removeFailedListener
/** * Removes the upload interrupted event listener. * * @param listener * the Listener to be removed. */ @Deprecated public void removeFailedListener(FailedListener listener) { removeListener(FailedEvent.class, listener, UPLOAD_FAILED_METHOD); }
3.68
hadoop_CommitTaskStage_executeStage
/** * Scan the task attempt dir then save the manifest. * A snapshot of the IOStats will be included in the manifest; * this includes the scan time. * @param arguments arguments to the function. * @return the path the manifest was saved to, and the manifest. * @throws IOException IO failure. */ @Override protected CommitTaskStage.Result executeStage(final Void arguments) throws IOException { LOG.info("{}: Committing task \"{}\"", getName(), getTaskAttemptId()); // execute the scan final TaskAttemptScanDirectoryStage scanStage = new TaskAttemptScanDirectoryStage(getStageConfig()); TaskManifest manifest = scanStage.apply(arguments); // add the scan as task commit. It's not quite, as it doesn't include // the saving, but ... scanStage.addExecutionDurationToStatistics(getIOStatistics(), OP_STAGE_TASK_COMMIT); // save a snapshot of the IO Statistics final IOStatisticsSnapshot manifestStats = snapshotIOStatistics(); manifestStats.aggregate(getIOStatistics()); manifest.setIOStatistics(manifestStats); // Now save with rename Path manifestPath = new SaveTaskManifestStage(getStageConfig()) .apply(manifest); return new CommitTaskStage.Result(manifestPath, manifest); }
3.68
hbase_MasterRpcServices_getSchemaAlterStatus
/** * Get the number of regions of the table that have been updated by the alter. * @return Pair indicating the number of regions updated Pair.getFirst is the regions that are yet * to be updated Pair.getSecond is the total number of regions of the table */ @Override public GetSchemaAlterStatusResponse getSchemaAlterStatus(RpcController controller, GetSchemaAlterStatusRequest req) throws ServiceException { // TODO: currently, we query using the table name on the client side. this // may overlap with other table operations or the table operation may // have completed before querying this API. We need to refactor to a // transaction system in the future to avoid these ambiguities. TableName tableName = ProtobufUtil.toTableName(req.getTableName()); try { server.checkInitialized(); Pair<Integer, Integer> pair = server.getAssignmentManager().getReopenStatus(tableName); GetSchemaAlterStatusResponse.Builder ret = GetSchemaAlterStatusResponse.newBuilder(); ret.setYetToUpdateRegions(pair.getFirst()); ret.setTotalRegions(pair.getSecond()); return ret.build(); } catch (IOException ioe) { throw new ServiceException(ioe); } }
3.68
framework_AbstractTestUI_runAfterResponse
/** * Execute the provided runnable on the UI thread as soon as the current * request has been sent. */ protected void runAfterResponse(final Runnable runnable) { // Immediately start a thread that will start waiting for the session to // get unlocked. new Thread() { @Override public void run() { accessSynchronously(runnable); } }.start(); }
3.68
flink_GenericDataSourceBase_getSplitDataProperties
/** * Returns the data properties of this data source's splits. * * @return The data properties of this data source's splits or null if no properties have been * set. */ public SplitDataProperties<OUT> getSplitDataProperties() { return this.splitProperties; }
3.68
hadoop_ComponentContainers_getContainers
/** * Returns the containers of the component. */ @ApiModelProperty(example = "null", value = "Containers of the component.") public List<Container> getContainers() { return containers; }
3.68
flink_HashPartition_insertIntoProbeBuffer
/** * Inserts the given record into the probe side buffers. This method is only applicable when the * partition was spilled while processing the build side. * * <p>If this method is invoked when the partition is still being built, it has undefined * behavior. * * @param record The record to be inserted into the probe side buffers. * @throws IOException Thrown, if the buffer is full, needs to be spilled, and spilling causes * an error. */ public final void insertIntoProbeBuffer(PT record) throws IOException { this.probeSideSerializer.serialize(record, this.probeSideBuffer); this.probeSideRecordCounter++; }
3.68
hbase_RequestConverter_buildModifyNamespaceRequest
/** * Creates a protocol buffer ModifyNamespaceRequest * @return a ModifyNamespaceRequest */ public static ModifyNamespaceRequest buildModifyNamespaceRequest(final NamespaceDescriptor descriptor) { ModifyNamespaceRequest.Builder builder = ModifyNamespaceRequest.newBuilder(); builder.setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(descriptor)); return builder.build(); }
3.68
framework_UIDL_getId
/** * Shorthand for getting the attribute named "id", which for Paintables is * the essential paintableId which binds the server side component to the * client side widget. * * @return the value of the id attribute, if available */ public String getId() { return getStringAttribute("id"); }
3.68
flink_WritableSavepoint_withConfiguration
/** * Sets a configuration that will be applied to the stream operators used to bootstrap a new * savepoint. * * @param option metadata information * @param value value to be stored * @param <T> type of the value to be stored * @return The modified savepoint. */ @SuppressWarnings("unchecked") public <T> F withConfiguration(ConfigOption<T> option, T value) { configuration.set(option, value); return (F) this; }
3.68
hudi_AbstractTableFileSystemView_convertFileStatusesToBaseFiles
/** * Helper to convert file-status to base-files. * * @param statuses List of File-Status */ private Stream<HoodieBaseFile> convertFileStatusesToBaseFiles(FileStatus[] statuses) { Predicate<FileStatus> roFilePredicate = fileStatus -> { String pathName = fileStatus.getPath().getName(); // Filter base files if: // 1. file extension equals to table configured file extension // 2. file is not .hoodie_partition_metadata return pathName.contains(metaClient.getTableConfig().getBaseFileFormat().getFileExtension()) && !pathName.startsWith(HoodiePartitionMetadata.HOODIE_PARTITION_METAFILE_PREFIX); }; return Arrays.stream(statuses).filter(roFilePredicate).map(HoodieBaseFile::new); }
3.68
morf_XmlDataSetProducer_indexes
/** * @see org.alfasoftware.morf.metadata.Table#indexes() */ @Override public List<Index> indexes() { return indexes; }
3.68
framework_VTabsheet_registerTab
/** * Register the tab to the selection handler. * * @param tab * the tab to register. */ public void registerTab(Tab tab) { tab.addBlurHandler(this); tab.addFocusHandler(this); tab.addKeyDownHandler(this); tab.addClickHandler(this); tab.addMouseDownHandler(this); }
3.68
hudi_HoodieInMemoryHashIndex_isGlobal
/** * Only looks up by recordKey. */ @Override public boolean isGlobal() { return true; }
3.68
framework_GridRowDragger_getGridDragSource
/** * Returns the drag source grid, exposing it for customizations. * * @return the drag source grid */ public GridDragSource<T> getGridDragSource() { return gridDragSource; }
3.68
flink_Task_preProcessException
/** Unwrap, enrich and handle fatal errors. */ private Throwable preProcessException(Throwable t) { // unwrap wrapped exceptions to make stack traces more compact if (t instanceof WrappingRuntimeException) { t = ((WrappingRuntimeException) t).unwrap(); } TaskManagerExceptionUtils.tryEnrichTaskManagerError(t); // check if the exception is unrecoverable if (ExceptionUtils.isJvmFatalError(t) || (t instanceof OutOfMemoryError && taskManagerConfig.shouldExitJvmOnOutOfMemoryError())) { // terminate the JVM immediately // don't attempt a clean shutdown, because we cannot expect the clean shutdown // to complete try { LOG.error( "Encountered fatal error {} - terminating the JVM", t.getClass().getName(), t); } finally { Runtime.getRuntime().halt(-1); } } return t; }
3.68
hudi_ClusteringOperator_transform
/** * Transform IndexedRecord into HoodieRecord. */ private RowData transform(IndexedRecord indexedRecord) { GenericRecord record = (GenericRecord) indexedRecord; return (RowData) avroToRowDataConverter.convert(record); }
3.68
flink_RemoteInputChannel_increaseBackoff
/** * The remote task manager creates partition request listener and returns {@link * PartitionNotFoundException} until the listener is timeout, so the backoff should add the * timeout milliseconds if it exists. * * @return <code>true</code>, iff the operation was successful. Otherwise, <code>false</code>. */ @Override protected boolean increaseBackoff() { if (partitionRequestListenerTimeout > 0) { currentBackoff += partitionRequestListenerTimeout; return currentBackoff < 2 * maxBackoff; } // Backoff is disabled return false; }
3.68
framework_CalendarTest_switchToWeekView
/* * Switch the view to week view. */ public void switchToWeekView() { viewMode = Mode.WEEK; // weekButton.setVisible(false); // monthButton.setVisible(true); }
3.68
pulsar_FunctionMetaDataManager_updateFunctionOnLeader
/** * Called by the worker when we are in the leader mode. In this state, we update our in-memory * data structures and then write to the metadata topic. * @param functionMetaData The function metadata in question * @param delete Is this a delete operation * @throws IllegalStateException if we are not the leader * @throws IllegalArgumentException if the request is out of date. */ public synchronized void updateFunctionOnLeader(FunctionMetaData functionMetaData, boolean delete) throws IllegalStateException, IllegalArgumentException { boolean needsScheduling; if (exclusiveLeaderProducer == null) { throw new IllegalStateException("Not the leader"); } // Check first to avoid local cache update failure checkRequestOutDated(functionMetaData, delete); byte[] toWrite; if (workerConfig.getUseCompactedMetadataTopic()) { if (delete) { toWrite = "".getBytes(); } else { toWrite = functionMetaData.toByteArray(); } } else { Request.ServiceRequest serviceRequest = Request.ServiceRequest.newBuilder() .setServiceRequestType(delete ? Request.ServiceRequest.ServiceRequestType.DELETE : Request.ServiceRequest.ServiceRequestType.UPDATE) .setFunctionMetaData(functionMetaData) .setWorkerId(workerConfig.getWorkerId()) .setRequestId(UUID.randomUUID().toString()) .build(); toWrite = serviceRequest.toByteArray(); } try { TypedMessageBuilder builder = exclusiveLeaderProducer.newMessage() .value(toWrite) .property(versionTag, Long.toString(functionMetaData.getVersion())); if (workerConfig.getUseCompactedMetadataTopic()) { builder = builder.key(FunctionCommon.getFullyQualifiedName(functionMetaData.getFunctionDetails())); } lastMessageSeen = builder.send(); if (delete) { needsScheduling = processDeregister(functionMetaData); } else { needsScheduling = processUpdate(functionMetaData); } } catch (Exception e) { log.error("Could not write into Function Metadata topic", e); throw new IllegalStateException("Internal Error updating function at the leader", e); } if (needsScheduling) { this.schedulerManager.schedule(); } }
3.68
hmily_CommonAssembler_assembleHmilySimpleTableSegment
/** * Assemble hmily simple table segment. * * @param simpleTableSegment simple table segment * @return hmily simple table segment */ public static HmilySimpleTableSegment assembleHmilySimpleTableSegment(final SimpleTableSegment simpleTableSegment) { TableNameSegment tableNameSegment = simpleTableSegment.getTableName(); HmilyIdentifierValue hmilyIdentifierValue = new HmilyIdentifierValue(tableNameSegment.getIdentifier().getValue()); HmilyTableNameSegment hmilyTableNameSegment = new HmilyTableNameSegment(tableNameSegment.getStartIndex(), tableNameSegment.getStopIndex(), hmilyIdentifierValue); HmilyOwnerSegment hmilyOwnerSegment = null; OwnerSegment ownerSegment; if (simpleTableSegment.getOwner().isPresent()) { ownerSegment = simpleTableSegment.getOwner().get(); hmilyOwnerSegment = new HmilyOwnerSegment(ownerSegment.getStartIndex(), ownerSegment.getStopIndex(), new HmilyIdentifierValue(ownerSegment.getIdentifier().getValue())); } HmilyAliasSegment hmilyAliasSegment = null; String aliasSegmentString; if (simpleTableSegment.getAlias().isPresent()) { aliasSegmentString = simpleTableSegment.getAlias().get(); hmilyAliasSegment = new HmilyAliasSegment(0, 0, new HmilyIdentifierValue(aliasSegmentString)); } HmilySimpleTableSegment hmilySimpleTableSegment = new HmilySimpleTableSegment(hmilyTableNameSegment); hmilySimpleTableSegment.setOwner(hmilyOwnerSegment); hmilySimpleTableSegment.setAlias(hmilyAliasSegment); return hmilySimpleTableSegment; }
3.68
streampipes_InfluxStore_connect
/** * Connects to the InfluxDB Server, sets the database and initializes the batch-behaviour * * @throws SpRuntimeException If not connection can be established or if the database could not * be found */ private void connect(InfluxConnectionSettings settings) throws SpRuntimeException { influxDb = InfluxClientProvider.getInfluxDBClient(settings); // Checking, if server is available var response = influxDb.ping(); if (response.getVersion().equalsIgnoreCase("unknown")) { throw new SpRuntimeException("Could not connect to InfluxDb Server: " + settings.getConnectionUrl()); } String databaseName = settings.getDatabaseName(); // Checking whether the database exists if (!InfluxRequests.databaseExists(influxDb, databaseName)) { LOG.info("Database '" + databaseName + "' not found. Gets created ..."); createDatabase(databaseName); } // setting up the database influxDb.setDatabase(databaseName); var batchSize = 2000; var flushDuration = 500; influxDb.enableBatch(batchSize, flushDuration, TimeUnit.MILLISECONDS); }
3.68
flink_ExecNodeUtil_makeLegacySourceTransformationsBounded
/** * The planner might have more information than expressed in legacy source transformations. This * enforces planner information about boundedness to the affected transformations. */ public static void makeLegacySourceTransformationsBounded(Transformation<?> transformation) { if (transformation instanceof LegacySourceTransformation) { ((LegacySourceTransformation<?>) transformation).setBoundedness(Boundedness.BOUNDED); } transformation.getInputs().forEach(ExecNodeUtil::makeLegacySourceTransformationsBounded); }
3.68
hbase_Client_addExtraHeader
/** * Add extra headers. These extra headers will be applied to all http methods before they are * removed. If any header is not used any more, client needs to remove it explicitly. */ public void addExtraHeader(final String name, final String value) { extraHeaders.put(name, value); }
3.68
flink_DataStreamSink_uid
/** * Sets an ID for this operator. * * <p>The specified ID is used to assign the same operator ID across job submissions (for * example when starting a job from a savepoint). * * <p><strong>Important</strong>: this ID needs to be unique per transformation and job. * Otherwise, job submission will fail. * * @param uid The unique user-specified ID of this transformation. * @return The operator with the specified ID. */ @PublicEvolving public DataStreamSink<T> uid(String uid) { transformation.setUid(uid); return this; }
3.68
flink_ProjectOperator_projectTuple23
/** * Projects a {@link Tuple} {@link DataSet} to the previously selected fields. * * @return The projected DataSet. * @see Tuple * @see DataSet */ public < T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22> ProjectOperator< T, Tuple23< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>> projectTuple23() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType()); TupleTypeInfo< Tuple23< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>> tType = new TupleTypeInfo< Tuple23< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>>(fTypes); return new ProjectOperator< T, Tuple23< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>>(this.ds, this.fieldIndexes, tType); }
3.68
framework_ContainerEventProvider_setDescriptionProperty
/** * Set the property which provides the description of the event. */ public void setDescriptionProperty(Object descriptionProperty) { this.descriptionProperty = descriptionProperty; }
3.68
flink_Tuple18_toString
/** * Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8, * f9, f10, f11, f12, f13, f14, f15, f16, f17), where the individual fields are the value * returned by calling {@link Object#toString} on that field. * * @return The string representation of the tuple. */ @Override public String toString() { return "(" + StringUtils.arrayAwareToString(this.f0) + "," + StringUtils.arrayAwareToString(this.f1) + "," + StringUtils.arrayAwareToString(this.f2) + "," + StringUtils.arrayAwareToString(this.f3) + "," + StringUtils.arrayAwareToString(this.f4) + "," + StringUtils.arrayAwareToString(this.f5) + "," + StringUtils.arrayAwareToString(this.f6) + "," + StringUtils.arrayAwareToString(this.f7) + "," + StringUtils.arrayAwareToString(this.f8) + "," + StringUtils.arrayAwareToString(this.f9) + "," + StringUtils.arrayAwareToString(this.f10) + "," + StringUtils.arrayAwareToString(this.f11) + "," + StringUtils.arrayAwareToString(this.f12) + "," + StringUtils.arrayAwareToString(this.f13) + "," + StringUtils.arrayAwareToString(this.f14) + "," + StringUtils.arrayAwareToString(this.f15) + "," + StringUtils.arrayAwareToString(this.f16) + "," + StringUtils.arrayAwareToString(this.f17) + ")"; }
3.68
flink_Path_isAbsolute
/** * Checks if the directory of this path is absolute. * * @return <code>true</code> if the directory of this path is absolute, <code>false</code> * otherwise */ public boolean isAbsolute() { final int start = hasWindowsDrive(uri.getPath(), true) ? 3 : 0; return uri.getPath().startsWith(SEPARATOR, start); }
3.68
AreaShop_SignLinkerManager_onPlayerLeave
/** * Handle disconnection players. * @param event The PlayerQuitEvent */ @EventHandler(priority = EventPriority.MONITOR) public void onPlayerLeave(PlayerQuitEvent event) { exitSignLinkMode(event.getPlayer()); }
3.68
flink_FlinkPreparingTableBase_getExpression
/** * Generates code for this table, which is not supported now. * * @param clazz The desired collection class, for example {@link * org.apache.calcite.linq4j.Queryable} */ public Expression getExpression(Class clazz) { throw new UnsupportedOperationException(); }
3.68
hadoop_FilterFileSystem_getFileStatus
/** * Get file status. */ @Override public FileStatus getFileStatus(Path f) throws IOException { return fs.getFileStatus(f); }
3.68
graphhopper_BitUtil_toBitString
/** * Higher order bits comes first in the returned string. */ public String toBitString(byte[] bytes) { StringBuilder sb = new StringBuilder(bytes.length * 8); byte lastBit = (byte) (1 << 7); for (int bIndex = bytes.length - 1; bIndex >= 0; bIndex--) { byte b = bytes[bIndex]; for (int i = 0; i < 8; i++) { if ((b & lastBit) == 0) sb.append('0'); else sb.append('1'); b <<= 1; } } return sb.toString(); }
3.68
flink_DeclarativeSlotManager_allocateSlot
/** * Allocates the given slot. This entails sending a registration message to the task manager and * treating failures. * * @param taskManagerSlot slot to allocate * @param jobId job for which the slot should be allocated for * @param targetAddress address of the job master * @param resourceProfile resource profile for the requirement for which the slot is used */ private void allocateSlot( TaskManagerSlotInformation taskManagerSlot, JobID jobId, String targetAddress, ResourceProfile resourceProfile) { final SlotID slotId = taskManagerSlot.getSlotId(); LOG.debug( "Starting allocation of slot {} for job {} with resource profile {}.", slotId, jobId, resourceProfile); final InstanceID instanceId = taskManagerSlot.getInstanceId(); if (!taskExecutorManager.isTaskManagerRegistered(instanceId)) { throw new IllegalStateException( "Could not find a registered task manager for instance id " + instanceId + '.'); } final TaskExecutorConnection taskExecutorConnection = taskManagerSlot.getTaskManagerConnection(); final TaskExecutorGateway gateway = taskExecutorConnection.getTaskExecutorGateway(); final AllocationID allocationId = new AllocationID(); slotTracker.notifyAllocationStart(slotId, jobId); taskExecutorManager.markUsed(instanceId); pendingSlotAllocations.put(slotId, allocationId); // RPC call to the task manager CompletableFuture<Acknowledge> requestFuture = gateway.requestSlot( slotId, jobId, allocationId, resourceProfile, targetAddress, resourceManagerId, taskManagerRequestTimeout); CompletableFuture<Void> slotAllocationResponseProcessingFuture = requestFuture.handleAsync( (Acknowledge acknowledge, Throwable throwable) -> { final AllocationID currentAllocationForSlot = pendingSlotAllocations.get(slotId); if (currentAllocationForSlot == null || !currentAllocationForSlot.equals(allocationId)) { LOG.debug( "Ignoring slot allocation update from task executor {} for slot {} and job {}, because the allocation was already completed or cancelled.", instanceId, slotId, jobId); return null; } if (acknowledge != null) { LOG.trace( "Completed allocation of slot {} for job {}.", slotId, jobId); slotTracker.notifyAllocationComplete(slotId, jobId); } else { if (throwable instanceof SlotOccupiedException) { SlotOccupiedException exception = (SlotOccupiedException) throwable; LOG.debug( "Tried allocating slot {} for job {}, but it was already allocated for job {}.", slotId, jobId, exception.getJobId()); // report as a slot status to force the state transition // this could be a problem if we ever assume that the task // executor always reports about all slots slotTracker.notifySlotStatus( Collections.singleton( new SlotStatus( slotId, taskManagerSlot.getResourceProfile(), exception.getJobId(), exception.getAllocationId()))); } else { LOG.warn( "Slot allocation for slot {} for job {} failed.", slotId, jobId, throwable); slotTracker.notifyFree(slotId); } checkResourceRequirementsWithDelay(); } return null; }, mainThreadExecutor); FutureUtils.assertNoException(slotAllocationResponseProcessingFuture); }
3.68
hmily_MotanHmilyAccountApplication_main
/** * main. * * @param args args. */ public static void main(final String[] args) { SpringApplication springApplication = new SpringApplication(MotanHmilyAccountApplication.class); springApplication.setWebApplicationType(WebApplicationType.NONE); springApplication.run(args); MotanSwitcherUtil.setSwitcherValue(MotanConstants.REGISTRY_HEARTBEAT_SWITCHER, true); System.out.println("MotanHmilyAccountApplication server start..."); }
3.68
framework_ApplicationConfiguration_getAtmosphereJSVersion
/** * Return Atmosphere JS version. * * @since 7.4 * * @return Atmosphere JS version. */ public String getAtmosphereJSVersion() { return getJsoConfiguration(id).getAtmosphereJSVersion(); }
3.68
flink_TableFactoryService_normalizeSupportedProperties
/** Prepares the supported properties of a factory to be used for match operations. */ private static Tuple2<List<String>, List<String>> normalizeSupportedProperties( TableFactory factory) { List<String> supportedProperties = factory.supportedProperties(); if (supportedProperties == null) { throw new TableException( String.format( "Supported properties of factory '%s' must not be null.", factory.getClass().getName())); } List<String> supportedKeys = supportedProperties.stream().map(String::toLowerCase).collect(Collectors.toList()); // extract wildcard prefixes List<String> wildcards = extractWildcardPrefixes(supportedKeys); return Tuple2.of(supportedKeys, wildcards); }
3.68
AreaShop_Utils_toName
/** * Conversion to name by uuid object. * @param uuid The uuid in string format * @return the name of the player */ public static String toName(UUID uuid) { if(uuid == null) { return ""; } else { String name = Bukkit.getOfflinePlayer(uuid).getName(); if(name != null) { return name; } return ""; } }
3.68
flink_IOUtils_closeAllQuietly
/** Closes all elements in the iterable with closeQuietly(). */ public static void closeAllQuietly(Iterable<? extends AutoCloseable> closeables) { if (null != closeables) { for (AutoCloseable closeable : closeables) { closeQuietly(closeable); } } }
3.68
pulsar_NamespacesBase_internalClearZkSources
// clear zk-node resources for deleting namespace protected CompletableFuture<Void> internalClearZkSources() { // clear resource of `/namespace/{namespaceName}` for zk-node return namespaceResources().deleteNamespaceAsync(namespaceName) .thenCompose(ignore -> namespaceResources().getPartitionedTopicResources() .clearPartitionedTopicMetadataAsync(namespaceName)) // clear resource for manager-ledger z-node .thenCompose(ignore -> pulsar().getPulsarResources().getTopicResources() .clearDomainPersistence(namespaceName)) .thenCompose(ignore -> pulsar().getPulsarResources().getTopicResources() .clearNamespacePersistence(namespaceName)) // we have successfully removed all the ownership for the namespace, the policies // z-node can be deleted now .thenCompose(ignore -> namespaceResources().deletePoliciesAsync(namespaceName)) // clear z-node of local policies .thenCompose(ignore -> getLocalPolicies().deleteLocalPoliciesAsync(namespaceName)) // clear /loadbalance/bundle-data .thenCompose(ignore -> loadBalanceResources().getBundleDataResources().deleteBundleDataAsync(namespaceName)); }
3.68
dubbo_Utf8Utils_trailingByteValue
/** * Returns the actual value of the trailing byte (removes the prefix '10') for composition. */ private static int trailingByteValue(byte b) { return b & 0x3F; }
3.68
morf_DrawIOGraphPrinter_hashCode
/** * Based on label only. * * @see java.lang.Object#hashCode() */ @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + (label == null ? 0 : label.hashCode()); return result; }
3.68
flink_DefaultRollingPolicy_withInactivityInterval
/** * Sets the interval of allowed inactivity after which a part file will have to roll. The * frequency at which this is checked is controlled by the {@link * org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink.RowFormatBuilder#withBucketCheckInterval(long)} * setting. * * @param interval the allowed inactivity interval. */ public DefaultRollingPolicy.PolicyBuilder withInactivityInterval(final Duration interval) { Preconditions.checkNotNull( interval, "Rolling policy inactivity interval cannot be null"); return new PolicyBuilder(partSize, rolloverInterval, interval.toMillis()); }
3.68
hbase_MasterRpcServices_setTableStateInMeta
/** * Update state of the table in meta only. This is required by hbck in some situations to cleanup * stuck assign/ unassign regions procedures for the table. * @return previous state of the table */ @Override public GetTableStateResponse setTableStateInMeta(RpcController controller, SetTableStateInMetaRequest request) throws ServiceException { rpcPreCheck("setTableStateInMeta"); TableName tn = ProtobufUtil.toTableName(request.getTableName()); try { TableState prevState = this.server.getTableStateManager().getTableState(tn); TableState newState = TableState.convert(tn, request.getTableState()); LOG.info("{} set table={} state from {} to {}", server.getClientIdAuditPrefix(), tn, prevState.getState(), newState.getState()); this.server.getTableStateManager().setTableState(tn, newState.getState()); return GetTableStateResponse.newBuilder().setTableState(prevState.convert()).build(); } catch (Exception e) { throw new ServiceException(e); } }
3.68
flink_RocksDBMemoryControllerUtils_allocateRocksDBSharedResources
/** * Allocate memory controllable RocksDB shared resources. * * @param totalMemorySize The total memory limit size. * @param writeBufferRatio The ratio of total memory which is occupied by write buffer manager. * @param highPriorityPoolRatio The high priority pool ratio of cache. * @param factory creates Write Buffer Manager and Bock Cache * @return memory controllable RocksDB shared resources. */ public static RocksDBSharedResources allocateRocksDBSharedResources( long totalMemorySize, double writeBufferRatio, double highPriorityPoolRatio, boolean usingPartitionedIndexFilters, RocksDBMemoryFactory factory) { long calculatedCacheCapacity = RocksDBMemoryControllerUtils.calculateActualCacheCapacity( totalMemorySize, writeBufferRatio); final Cache cache = factory.createCache(calculatedCacheCapacity, highPriorityPoolRatio); long writeBufferManagerCapacity = RocksDBMemoryControllerUtils.calculateWriteBufferManagerCapacity( totalMemorySize, writeBufferRatio); final WriteBufferManager wbm = factory.createWriteBufferManager(writeBufferManagerCapacity, cache); LOG.debug( "Allocated RocksDB shared resources, calculatedCacheCapacity: {}, highPriorityPoolRatio: {}, writeBufferManagerCapacity: {}, usingPartitionedIndexFilters: {}", calculatedCacheCapacity, highPriorityPoolRatio, writeBufferManagerCapacity, usingPartitionedIndexFilters); return new RocksDBSharedResources( cache, wbm, writeBufferManagerCapacity, usingPartitionedIndexFilters); }
3.68
framework_MultiSelectionModelImpl_fetchAllHierarchical
/** * Fetch all items from the given hierarchical data provider. * * @since 8.1 * @param dataProvider * the data provider to fetch from * @return all items in the data provider */ private Stream<T> fetchAllHierarchical( HierarchicalDataProvider<T, ?> dataProvider) { return fetchAllDescendants(null, dataProvider); }
3.68
hadoop_BalanceProcedureScheduler_shutDownAndWait
/** * Shutdown scheduler and wait at most timeout seconds for procedures to * finish. * @param timeout Wait at most timeout seconds for procedures to finish. */ public synchronized void shutDownAndWait(int timeout) { shutDown(); while (readerThread.isAlive()) { try { readerThread.join(); } catch (InterruptedException e) { } } while (roosterThread.isAlive()) { try { roosterThread.join(); } catch (InterruptedException e) { } } while (recoverThread.isAlive()) { try { recoverThread.join(); } catch (InterruptedException e) { } } while (!workersPool.isTerminated()) { try { workersPool.awaitTermination(timeout, TimeUnit.SECONDS); } catch (InterruptedException e) { } } }
3.68
hbase_RegionReplicaUtil_getRegionInfoForDefaultReplica
/** * Returns the RegionInfo for the default replicaId (0). RegionInfo's correspond to a range of a * table, but more than one "instance" of the same range can be deployed which are differentiated * by the replicaId. * @return an RegionInfo object corresponding to the same range (table, start and end key), but * for the default replicaId. */ public static RegionInfo getRegionInfoForDefaultReplica(RegionInfo regionInfo) { return getRegionInfoForReplica(regionInfo, DEFAULT_REPLICA_ID); }
3.68
hbase_ScannerModel_hasStartRow
/** Returns true if a start row was specified */ public boolean hasStartRow() { return !Bytes.equals(startRow, HConstants.EMPTY_START_ROW); }
3.68
hudi_HoodieBigQuerySyncClient_tableNotExistsOrDoesNotMatchSpecification
/** * Checks for the existence of a table that uses the manifest file approach and matches other requirements. * @param tableName name of the table * @return Returns true if the table does not exist or if the table does exist but does not use the manifest file. False otherwise. */ public boolean tableNotExistsOrDoesNotMatchSpecification(String tableName) { TableId tableId = TableId.of(projectId, datasetName, tableName); Table table = bigquery.getTable(tableId); if (table == null || !table.exists()) { return true; } ExternalTableDefinition externalTableDefinition = table.getDefinition(); boolean manifestDoesNotExist = externalTableDefinition.getSourceUris() == null || externalTableDefinition.getSourceUris().stream().noneMatch(uri -> uri.contains(ManifestFileWriter.ABSOLUTE_PATH_MANIFEST_FOLDER_NAME)); if (!StringUtils.isNullOrEmpty(config.getString(BIGQUERY_SYNC_BIG_LAKE_CONNECTION_ID))) { // If bigLakeConnectionId is present and connectionId is not present in table definition, we need to replace the table. return manifestDoesNotExist || externalTableDefinition.getConnectionId() == null; } return manifestDoesNotExist; }
3.68
flink_ListDelimitedSerializer_deserializeNextElement
/** Deserializes a single element from a serialized list. */ public static <T> T deserializeNextElement( DataInputDeserializer in, TypeSerializer<T> elementSerializer) throws IOException { if (in.available() > 0) { T element = elementSerializer.deserialize(in); if (in.available() > 0) { in.readByte(); } return element; } return null; }
3.68
framework_Escalator_calculateTotalRowHeight
/** * Returns the height of all rows in the row container. */ protected double calculateTotalRowHeight() { return getDefaultRowHeight() * getRowCount(); }
3.68
hadoop_CacheStats_reserve
/** * Try to reserve more bytes. * * @param count * The number of bytes to add. We will round this up to the page * size. * * @return The new number of usedBytes if we succeeded; -1 if we failed. */ long reserve(long count) { return usedBytesCount.reserve(count); }
3.68
open-banking-gateway_PsuEncryptionServiceProvider_generateKeyPair
/** * Generate random key pair. * @return Random key pair. */ public KeyPair generateKeyPair() { return oper.generatePublicPrivateKey(); }
3.68
streampipes_TextBlock_addLabels
/** * Adds a set of labels to this {@link TextBlock}. <code>null</code>-references are silently * ignored. * * @param l The labels to be added. */ public void addLabels(final String... l) { if (l == null) { return; } if (this.labels == null) { this.labels = new HashSet<String>(); } for (final String label : l) { this.labels.add(label); } }
3.68
framework_VListSelect_getSelectedItems
/** * Gets the currently selected item values. * * @return the currently selected item keys */ protected FastStringSet getSelectedItems() { final FastStringSet selectedItemKeys = FastStringSet.create(); for (int i = 0; i < select.getItemCount(); i++) { if (select.isItemSelected(i)) { String key = select.getValue(i); selectedItemKeys.add(key); } } return selectedItemKeys; }
3.68
framework_SessionInitEvent_getSession
/** * Gets the Vaadin service session that has been initialized. * * @return the Vaadin service session */ public VaadinSession getSession() { return session; }
3.68
flink_ArrowWriter_finish
/** Finishes the writing of the current row batch. */ public void finish() { root.setRowCount(fieldWriters[0].getCount()); for (ArrowFieldWriter<IN> fieldWriter : fieldWriters) { fieldWriter.finish(); } }
3.68
flink_HiveParserSemanticAnalyzer_processPTF
/* * - invoked during FROM AST tree processing, on encountering a PTF invocation. * - tree form is * ^(TOK_PTBLFUNCTION name partitionTableFunctionSource partitioningSpec? arguments*) * - setup a HiveParserPTFInvocationSpec for this top level PTF invocation. */ private void processPTF(HiveParserQB qb, HiveParserASTNode ptf) throws SemanticException { PartitionedTableFunctionSpec ptfSpec = processPTFChain(qb, ptf); if (ptfSpec.getAlias() != null) { qb.addAlias(ptfSpec.getAlias()); } HiveParserPTFInvocationSpec spec = new HiveParserPTFInvocationSpec(); spec.setFunction(ptfSpec); qb.addPTFNodeToSpec(ptf, spec); }
3.68
hbase_OrderedBytes_lengthVaruint64
/** * Inspect {@code src} for an encoded varuint64 for its length in bytes. Preserves the state of * {@code src}. * @param src source buffer * @param comp if true, parse the compliment of the value. * @return the number of bytes consumed by this value. */ static int lengthVaruint64(PositionedByteRange src, boolean comp) { int a0 = (comp ? DESCENDING : ASCENDING).apply(src.peek()) & 0xff; if (a0 <= 240) return 1; if (a0 <= 248) return 2; if (a0 == 249) return 3; if (a0 == 250) return 4; if (a0 == 251) return 5; if (a0 == 252) return 6; if (a0 == 253) return 7; if (a0 == 254) return 8; if (a0 == 255) return 9; throw unexpectedHeader(src.peek()); }
3.68
framework_TooltipInfo_setErrorMessage
/** * Sets the error message. * * @param errorMessage * the error message to set */ public void setErrorMessage(String errorMessage) { errorMessageHtml = errorMessage; }
3.68
hmily_TransactionContext_setCoordinator
/** * Sets coordinator. * * @param coordinator the coordinator */ public void setCoordinator(final Coordinator coordinator) { this.coordinator = coordinator; }
3.68
hadoop_RoleModel_effect
/** * Map a bool to an effect. * @param allowed is the statement to allow actions? * @return the appropriate effect. */ public static Effects effect(final boolean allowed) { return allowed ? Effects.Allow : Effects.Deny; }
3.68
hbase_BloomFilterFactory_createFromMeta
/** * Instantiates the correct Bloom filter class based on the version provided in the meta block * data. * @param meta the byte array holding the Bloom filter's metadata, including version information * @param reader the {@link HFile} reader to use to lazily load Bloom filter blocks * @return an instance of the correct type of Bloom filter */ public static BloomFilter createFromMeta(DataInput meta, HFile.Reader reader) throws IllegalArgumentException, IOException { return createFromMeta(meta, reader, null); }
3.68
hbase_FutureUtils_unwrapCompletionException
/** * Get the cause of the {@link Throwable} if it is a {@link CompletionException}. */ public static Throwable unwrapCompletionException(Throwable error) { if (error instanceof CompletionException) { Throwable cause = error.getCause(); if (cause != null) { return cause; } } return error; }
3.68
hadoop_IdentifierResolver_setOutputValueClass
/** * Sets the output value class. */ protected void setOutputValueClass(Class outputValueClass) { this.outputValueClass = outputValueClass; }
3.68
hudi_ExternalFilePathUtil_appendCommitTimeAndExternalFileMarker
/** * Appends the commit time and external file marker to the file path. Hudi relies on the commit time in the file name for properly generating views of the files in a table. * @param filePath The original file path * @param commitTime The time of the commit that added this file to the table * @return The file path with this additional information appended */ public static String appendCommitTimeAndExternalFileMarker(String filePath, String commitTime) { return filePath + "_" + commitTime + EXTERNAL_FILE_SUFFIX; }
3.68
hbase_StorageClusterStatusModel_getLiveNode
/** * @param index the index * @return the region server model */ public Node getLiveNode(int index) { return liveNodes.get(index); }
3.68
hbase_ReplicationSourceManager_getWALFilesToReplicate
// sorted from oldest to newest private PriorityQueue<Path> getWALFilesToReplicate(ServerName sourceRS, boolean syncUp, Map<String, ReplicationGroupOffset> offsets) throws IOException { List<Path> walFiles = AbstractFSWALProvider.getArchivedWALFiles(conf, sourceRS, URLEncoder.encode(sourceRS.toString(), StandardCharsets.UTF_8.name())); if (syncUp) { // we also need to list WALs directory for ReplicationSyncUp walFiles.addAll(AbstractFSWALProvider.getWALFiles(conf, sourceRS)); } PriorityQueue<Path> walFilesPQ = new PriorityQueue<>(AbstractFSWALProvider.TIMESTAMP_COMPARATOR); // sort the wal files and also filter out replicated files for (Path file : walFiles) { String walGroupId = AbstractFSWALProvider.getWALPrefixFromWALName(file.getName()); ReplicationGroupOffset groupOffset = offsets.get(walGroupId); if (shouldReplicate(groupOffset, file.getName())) { walFilesPQ.add(file); } else { LOG.debug("Skip enqueuing log {} because it is before the start offset {}", file.getName(), groupOffset); } } return walFilesPQ; }
3.68
framework_AbstractColorPicker_getTextfieldVisibility
/** * Gets the visibility of CSS color code text field. * * @since 7.5.0 * @return visibility of css color code text field */ public boolean getTextfieldVisibility() { return textfieldVisible; }
3.68
hadoop_MountdBase_startUDPServer
/* Start UDP server */ private void startUDPServer() { udpServer = new SimpleUdpServer(rpcProgram.getPort(), rpcProgram, 1); rpcProgram.startDaemons(); try { udpServer.run(); } catch (Throwable e) { LOG.error("Failed to start the UDP server.", e); if (udpServer.getBoundPort() > 0) { rpcProgram.unregister(PortmapMapping.TRANSPORT_UDP, udpServer.getBoundPort()); } udpServer.shutdown(); terminate(1, e); } udpBoundPort = udpServer.getBoundPort(); }
3.68
framework_MessageSender_send
/** * Sends an asynchronous or synchronous UIDL request to the server using the * given URI. * * @param payload * The contents of the request to send */ public void send(final JsonObject payload) { if (push != null && push.isBidirectional()) { push.push(payload); } else { xhrConnection.send(payload); } }
3.68
flink_TableConfigUtils_getCalciteConfig
/** * Returns {@link CalciteConfig} wraps in the given TableConfig. * * @param tableConfig TableConfig object * @return wrapped CalciteConfig. */ public static CalciteConfig getCalciteConfig(TableConfig tableConfig) { return tableConfig .getPlannerConfig() .unwrap(CalciteConfig.class) .orElse(CalciteConfig$.MODULE$.DEFAULT()); }
3.68
hbase_HFileBlock_cloneUncompressedBufferWithHeader
/** * Clones the header followed by the uncompressed data, even if using compression. This is * needed for storing uncompressed blocks in the block cache. Can be called in the "writing" * state or the "block ready" state. Returns only the header and data, does not include checksum * data. * @return Returns an uncompressed block ByteBuff for caching on write */ ByteBuff cloneUncompressedBufferWithHeader() { expectState(State.BLOCK_READY); ByteBuff bytebuff = allocator.allocate(baosInMemory.size()); baosInMemory.toByteBuff(bytebuff); int numBytes = (int) ChecksumUtil.numBytes(onDiskBlockBytesWithHeader.size(), fileContext.getBytesPerChecksum()); putHeader(bytebuff, onDiskBlockBytesWithHeader.size() + numBytes, baosInMemory.size(), onDiskBlockBytesWithHeader.size()); bytebuff.rewind(); return bytebuff; }
3.68
framework_AbstractComponent_setWidthUndefined
/* * (non-Javadoc) * * @see com.vaadin.server.Sizeable#setWidthUndefined() */ @Override public void setWidthUndefined() { setWidth(-1, Unit.PIXELS); }
3.68
flink_FileMergingSnapshotManagerBase_deletePhysicalFile
/** * Delete a physical file by given file path. Use the io executor to do the deletion. * * @param filePath the given file path to delete. */ protected final void deletePhysicalFile(Path filePath) { ioExecutor.execute( () -> { try { fs.delete(filePath, false); LOG.debug("Physical file deleted: {}.", filePath); } catch (IOException e) { LOG.warn("Fail to delete file: {}", filePath); } }); }
3.68
hbase_MetricsSource_clear
/** Removes all metrics about this Source. */ public void clear() { terminate(); singleSourceSource.clear(); }
3.68
framework_EditorConnector_handleServerInitiated
/** * Used to handle the case where the editor calls us because it was * invoked by the server via RPC and not by the client. In that case, * the request can be simply synchronously completed. * * @param request * the request object * @return true if the request was originally triggered by the server, * false otherwise */ private boolean handleServerInitiated(EditorRequest<?> request) { assert request != null : "Cannot handle null request"; assert currentRequest == null : "Earlier request not yet finished"; if (serverInitiated) { serverInitiated = false; request.success(); return true; } else { return false; } }
3.68
druid_DruidAbstractDataSource_getQueryTimeout
/** * Retrieves the number of seconds the driver will wait for a <code>Statement</code> object to execute. If the limit * is exceeded, a <code>SQLException</code> is thrown. * * @return the current query timeout limit in seconds; zero means there is no limit * <code>Statement</code> * @see #setQueryTimeout */ public int getQueryTimeout() { return queryTimeout; }
3.68
Activiti_BooleanToString_primTransform
/** * {@inheritDoc} */ @Override protected Object primTransform(Object anObject) throws Exception { return ((Boolean) anObject).toString(); }
3.68
morf_UnionSetOperator_drive
/** * @see org.alfasoftware.morf.util.ObjectTreeTraverser.Driver#drive(ObjectTreeTraverser) */ @Override public void drive(ObjectTreeTraverser traverser) { traverser.dispatch(getSelectStatement()); }
3.68
pulsar_FunctionCacheManager_registerFunction
/** * Registers a function with its required jar files and classpaths. * * <p>The jar files are identified by their blob keys and downloaded for * use by a {@link ClassLoader}. * * @param fid function id * @param requiredJarFiles collection of blob keys identifying the required jar files. * @param requiredClasspaths collection of classpaths that are added to the function code class loader. */ default void registerFunction(String fid, List<String> requiredJarFiles, List<URL> requiredClasspaths) throws IOException { registerFunctionInstance(fid, null, requiredJarFiles, requiredClasspaths); }
3.68
morf_AbstractSqlDialectTest_testSelectOrderByNullsLastScript
/** * Tests a select with an "order by" clause with nulls last and default direction. */ @Test public void testSelectOrderByNullsLastScript() { FieldReference fieldReference = new FieldReference(STRING_FIELD); SelectStatement stmt = new SelectStatement(fieldReference) .from(new TableReference(ALTERNATE_TABLE)) .orderBy(fieldReference.nullsLast()); assertEquals("Select with order by", expectedSelectOrderByNullsLast(), testDialect.convertStatementToSQL(stmt)); }
3.68
flink_StringValueUtils_replaceNonWordChars
/** * Replaces all non-word characters in a string by a given character. The only characters not * replaced are the characters that qualify as word characters or digit characters with respect * to {@link Character#isLetter(char)} or {@link Character#isDigit(char)}, as well as the * underscore character. * * <p>This operation is intended to simplify strings for counting distinct words. * * @param string The string value to have the non-word characters replaced. * @param replacement The character to use as the replacement. */ public static void replaceNonWordChars(StringValue string, char replacement) { final char[] chars = string.getCharArray(); final int len = string.length(); for (int i = 0; i < len; i++) { final char c = chars[i]; if (!(Character.isLetter(c) || Character.isDigit(c) || c == '_')) { chars[i] = replacement; } } }
3.68
framework_UidlRequestHandler_openJsonMessage
/** * Writes the opening of JSON message to be sent to client. * * @param outWriter * @param response * @throws IOException */
3.68
hadoop_STSClientFactory_builder
/** * Create the builder ready for any final configuration options. * Picks up connection settings from the Hadoop configuration, including * proxy secrets. * @param conf AWS configuration. * @param credentials AWS credential chain to use * @param stsEndpoint optional endpoint "https://sns.us-west-1.amazonaws.com" * @param stsRegion the region, e.g "us-west-1". Must be set if endpoint is. * @param bucket bucket name * @return the builder to call {@code build()} * @throws IOException problem reading proxy secrets */ public static StsClientBuilder builder(final AwsCredentialsProvider credentials, final Configuration conf, final String stsEndpoint, final String stsRegion, final String bucket) throws IOException { final StsClientBuilder stsClientBuilder = StsClient.builder(); Preconditions.checkArgument(credentials != null, "No credentials"); final ClientOverrideConfiguration.Builder clientOverrideConfigBuilder = AWSClientConfig.createClientConfigBuilder(conf, AWS_SERVICE_IDENTIFIER_STS); final ApacheHttpClient.Builder httpClientBuilder = AWSClientConfig.createHttpClientBuilder(conf); final RetryPolicy.Builder retryPolicyBuilder = AWSClientConfig.createRetryPolicyBuilder(conf); final ProxyConfiguration proxyConfig = AWSClientConfig.createProxyConfiguration(conf, bucket); clientOverrideConfigBuilder.retryPolicy(retryPolicyBuilder.build()); httpClientBuilder.proxyConfiguration(proxyConfig); stsClientBuilder.httpClientBuilder(httpClientBuilder) .overrideConfiguration(clientOverrideConfigBuilder.build()) .credentialsProvider(credentials); boolean destIsStandardEndpoint = STS_STANDARD.equals(stsEndpoint); if (isNotEmpty(stsEndpoint) && !destIsStandardEndpoint) { Preconditions.checkArgument(isNotEmpty(stsRegion), "STS endpoint is set to %s but no signing region was provided", stsEndpoint); LOG.debug("STS Endpoint={}; region='{}'", stsEndpoint, stsRegion); stsClientBuilder.endpointOverride(getSTSEndpoint(stsEndpoint)).region(Region.of(stsRegion)); } else { Preconditions.checkArgument(isEmpty(stsRegion), "STS signing region set set to %s but no STS endpoint specified", stsRegion); } return stsClientBuilder; }
3.68