name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
aws-saas-boost_KeycloakApi_toKeycloakUser
// VisibleForTesting static UserRepresentation toKeycloakUser(Map<String, Object> user) { if (user != null) { final UserRepresentation keycloakUser = new UserRepresentation(); // for each "set" function in UserRepresentation, parse the mapped value from user Arrays.stream(UserRepresentation.class.getMethods()) .filter(method -> method.getName().toLowerCase().startsWith("set")) .forEach(setMethod -> { try { String capitalizedAttributeName = setMethod.getName().substring("set".length()); // we need to "lowercase" the first character when we pull the attribute name this way char[] cs = capitalizedAttributeName.toCharArray(); cs[0] = Character.toLowerCase(cs[0]); String attributeName = new String(cs); Class attributeType = setMethod.getParameterTypes()[0]; if (user.containsKey(attributeName)) { if (attributeType == Long.class) { // createdTimestamp is parsed by Jackson as an Integer // but stored in UserRepresentation as a Long setMethod.invoke(keycloakUser, Long.parseLong(user.get(attributeName).toString())); } else if (attributeType == Set.class) { // disableableCredentialTypes is parsed by Jackson as an // ArrayList<String> but stored in UserRepresentation as a Set<String> setMethod.invoke(keycloakUser, new HashSet<String>((ArrayList<String>) user.get(attributeName))); } else { setMethod.invoke(keycloakUser, attributeType.cast(user.get(attributeName))); } } else { LOGGER.info("User JSON map does not contain {}, skipping.", attributeName); } } catch (Exception e) { LOGGER.error("Error converting user map to keycloak user"); LOGGER.error(Utils.getFullStackTrace(e)); throw new RuntimeException(e); } }); return keycloakUser; } return null; }
3.68
framework_IndexedContainer_firePropertyValueChange
/** * Sends a Property value change event to all interested listeners. * * @param source * the IndexedContainerProperty object. */ private void firePropertyValueChange(IndexedContainerProperty source) { // Sends event to listeners listening all value changes if (propertyValueChangeListeners != null) { final Property.ValueChangeEvent event = new IndexedContainer.PropertyValueChangeEvent( source); for (Object l : propertyValueChangeListeners.toArray()) { ((Property.ValueChangeListener) l).valueChange(event); } } // Sends event to single property value change listeners if (singlePropertyValueChangeListeners != null) { final Map<Object, List<Property.ValueChangeListener>> propertySetToListenerListMap = singlePropertyValueChangeListeners .get(source.propertyId); if (propertySetToListenerListMap != null) { final List<Property.ValueChangeListener> listenerList = propertySetToListenerListMap .get(source.itemId); if (listenerList != null) { final Property.ValueChangeEvent event = new IndexedContainer.PropertyValueChangeEvent( source); for (Object listener : listenerList.toArray()) { ((Property.ValueChangeListener) listener) .valueChange(event); } } } } }
3.68
morf_ChangelogBuilder_withUpgradeSteps
/** * Add to the collection of {@link UpgradeStep}'s to include in this * changelog. * * @param upgradeSteps The upgrade steps to add * @return This builder for chaining */ public ChangelogBuilder withUpgradeSteps(Collection<Class<? extends UpgradeStep>> upgradeSteps) { this.upgradeSteps.addAll(upgradeSteps); return this; }
3.68
hbase_SnapshotManager_isSnapshotDone
/** * Check if the specified snapshot is done * @return true if snapshot is ready to be restored, false if it is still being taken. * @throws IOException IOException if error from HDFS or RPC * @throws UnknownSnapshotException if snapshot is invalid or does not exist. */ public boolean isSnapshotDone(SnapshotDescription expected) throws IOException { // check the request to make sure it has a snapshot if (expected == null) { throw new UnknownSnapshotException( "No snapshot name passed in request, can't figure out which snapshot you want to check."); } Long procId = snapshotToProcIdMap.get(expected); if (procId != null) { if (master.getMasterProcedureExecutor().isRunning()) { return master.getMasterProcedureExecutor().isFinished(procId); } else { return false; } } String ssString = ClientSnapshotDescriptionUtils.toString(expected); // check to see if the sentinel exists, // and if the task is complete removes it from the in-progress snapshots map. SnapshotSentinel handler = removeSentinelIfFinished(this.snapshotHandlers, expected); // stop tracking "abandoned" handlers cleanupSentinels(); if (handler == null) { // If there's no handler in the in-progress map, it means one of the following: // - someone has already requested the snapshot state // - the requested snapshot was completed long time ago (cleanupSentinels() timeout) // - the snapshot was never requested // In those cases returns to the user the "done state" if the snapshots exists on disk, // otherwise raise an exception saying that the snapshot is not running and doesn't exist. if (!isSnapshotCompleted(expected)) { throw new UnknownSnapshotException("Snapshot " + ssString + " is not currently running or one of the known completed snapshots."); } // was done, return true; return true; } // pass on any failure we find in the sentinel try { handler.rethrowExceptionIfFailed(); } catch (ForeignException e) { // Give some procedure info on an exception. String status; Procedure p = coordinator.getProcedure(expected.getName()); if (p != null) { status = p.getStatus(); } else { status = expected.getName() + " not found in proclist " + coordinator.getProcedureNames(); } throw new HBaseSnapshotException("Snapshot " + ssString + " had an error. " + status, e, ProtobufUtil.createSnapshotDesc(expected)); } // check to see if we are done if (handler.isFinished()) { LOG.debug("Snapshot '" + ssString + "' has completed, notifying client."); return true; } else if (LOG.isDebugEnabled()) { LOG.debug("Snapshoting '" + ssString + "' is still in progress!"); } return false; }
3.68
morf_SqlServerDialect_getInternalColumnRepresentation
/** * Gets the underlying column representation (e.g. without any COLLATE statements). * * @param dataType the column datatype. * @param width the column width. * @param scale the column scale. * @return a string representation of the column definition. */ private String getInternalColumnRepresentation(DataType dataType, int width, int scale) { switch (dataType) { case STRING: return String.format("NVARCHAR(%d)", width); case DECIMAL: return String.format("NUMERIC(%d,%d)", width, scale); case DATE: return "DATE"; case BOOLEAN: return "BIT"; case BIG_INTEGER: return "BIGINT"; case INTEGER: return "INTEGER"; case BLOB: return "IMAGE"; case CLOB: return "NVARCHAR(MAX)"; default: throw new UnsupportedOperationException("Cannot map column with type [" + dataType + "]"); } }
3.68
morf_ExceptSetOperator_equals
/** * @see java.lang.Object#equals(java.lang.Object) */ @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; ExceptSetOperator other = (ExceptSetOperator) obj; if (selectStatement == null) { if (other.selectStatement != null) return false; } else if (!selectStatement.equals(other.selectStatement)) return false; return true; }
3.68
framework_Button_isMetaKey
/** * Checks if the Meta key was down when the mouse event took place. * * @return true if Meta was pressed when the event occurred, false * otherwise or if unknown */ public boolean isMetaKey() { if (null != details) { return details.isMetaKey(); } else { return false; } }
3.68
framework_ListSelectElement_deselectByText
/** * Deselects the option(s) with the given text. * * @param text * the text of the option */ public void deselectByText(String text) { if (isReadOnly()) { throw new ReadOnlyException(); } select.deselectByVisibleText(text); if (isPhantomJS() && select.isMultiple()) { // Phantom JS does not fire a change event when // selecting/deselecting items in a multi select fireChangeEvent(selectElement); } }
3.68
framework_Calendar_setEventProvider
/** * Set the {@link CalendarEventProvider} to be used with this calendar. The * EventProvider is used to query for events to show, and must be non-null. * By default a {@link BasicEventProvider} is used. * * @param calendarEventProvider * the calendarEventProvider to set. Cannot be null. */ public void setEventProvider(CalendarEventProvider calendarEventProvider) { if (calendarEventProvider == null) { throw new IllegalArgumentException( "Calendar event provider cannot be null"); } // remove old listener if (getEventProvider() instanceof EventSetChangeNotifier) { ((EventSetChangeNotifier) getEventProvider()) .removeEventSetChangeListener(this); } this.calendarEventProvider = calendarEventProvider; // add new listener if (calendarEventProvider instanceof EventSetChangeNotifier) { ((EventSetChangeNotifier) calendarEventProvider) .addEventSetChangeListener(this); } }
3.68
hudi_FailSafeConsistencyGuard_getFilesWithoutSchemeAndAuthority
/** * Generate file names without scheme and authority. * * @param files list of files of interest. * @return the filenames without scheme and authority. */ protected List<String> getFilesWithoutSchemeAndAuthority(List<String> files) { return files.stream().map(f -> Path.getPathWithoutSchemeAndAuthority(new Path(f))).map(Path::toString) .collect(Collectors.toList()); }
3.68
flink_TopNBuffer_entrySet
/** Returns a {@link Set} view of the mappings contained in the buffer. */ public Set<Map.Entry<RowData, Collection<RowData>>> entrySet() { return treeMap.entrySet(); }
3.68
framework_DropTargetExtension_setDropCriteriaScript
/** * Sets a criteria script in JavaScript to allow drop on this drop target. * The script is executed when something is dragged on top of the target, * and the drop is not allowed in case the script returns {@code false}. * <p> * Drop will be allowed if it passes both this criteria script and the * criteria set via any of {@code setDropCriterion()} or {@code * setDropCriteria()} methods. If no criteria is set, then the drop is * always accepted, if the set {@link #setDropEffect(DropEffect) dropEffect} * matches the drag source. * <p> * <b>IMPORTANT:</b> Construct the criteria script carefully and do not * include untrusted sources such as user input. Always keep in mind that * the script is executed on the client as is. * <p> * Example: * * <pre> * target.setDropCriterion( * // If dragged source contains a URL, allow it to be dropped * "if (event.dataTransfer.types.includes('text/uri-list')) {" * + " return true;" + "}" + * * // Otherwise cancel the event * "return false;"); * </pre> * * @param criteriaScript * JavaScript to be executed when drop event happens or * {@code null} to clear. */ public void setDropCriteriaScript(String criteriaScript) { if (!Objects.equals(getState(false).criteriaScript, criteriaScript)) { getState().criteriaScript = criteriaScript; } }
3.68
hadoop_FilterFileSystem_listStatusIterator
/** Return a remote iterator for listing in a directory */ @Override public RemoteIterator<FileStatus> listStatusIterator(Path f) throws IOException { return fs.listStatusIterator(f); }
3.68
morf_AbstractSqlDialectTest_testCastToString
/** * Tests the output of a cast to a string. */ @Test public void testCastToString() { String result = testDialect.getSqlFrom(new Cast(new FieldReference("value"), DataType.STRING, 10)); assertEquals(expectedStringCast(), result); }
3.68
hadoop_MutableQuantiles_setNumInfo
/** * Set info about the metrics. * * @param pNumInfo info about the metrics. */ public synchronized void setNumInfo(MetricsInfo pNumInfo) { this.numInfo = pNumInfo; }
3.68
morf_AbstractSqlDialectTest_testAddStringColumnWithDefault
/** * Test adding a string column. */ @Test public void testAddStringColumnWithDefault() { testAlterTableColumn(AlterationType.ADD, column("stringField_with_default", DataType.STRING, 6).defaultValue("N"), expectedAlterTableAddStringColumnWithDefaultStatement()); }
3.68
streampipes_AbstractConfigurablePipelineElementBuilder_requiredCodeblock
/** * Assigns a new code block parameter (without a specific language) which is required * by the processing element. * * @param label The {@link org.apache.streampipes.sdk.helpers.Label} that describes why this parameter is needed in a * user-friendly manner. * @return this */ public K requiredCodeblock(Label label) { return requiredCodeblock(label, CodeLanguage.None); }
3.68
rocketmq-connect_AbstractConfigManagementService_processConnectorConfigRecord
/** * process connector config record * * @param connectorName * @param schemaAndValue */ private void processConnectorConfigRecord(String connectorName, SchemaAndValue schemaAndValue) { if (mergeConnectConfig(connectorName, schemaAndValue)) { // reblance for connector triggerListener(); } }
3.68
hadoop_StoragePolicySatisfyManager_verifyOutstandingPathQLimit
/** * Verify that satisfier queue limit exceeds allowed outstanding limit. * @throws IOException */ public void verifyOutstandingPathQLimit() throws IOException { long size = pathsToBeTraversed.size(); // Checking that the SPS call Q exceeds the allowed limit. if (outstandingPathsLimit - size <= 0) { LOG.debug("Satisifer Q - outstanding limit:{}, current size:{}", outstandingPathsLimit, size); throw new IOException("Outstanding satisfier queue limit: " + outstandingPathsLimit + " exceeded, try later!"); } }
3.68
flink_FutureUtils_completedExceptionally
/** * Returns an exceptionally completed {@link CompletableFuture}. * * @param cause to complete the future with * @param <T> type of the future * @return An exceptionally completed CompletableFuture */ public static <T> CompletableFuture<T> completedExceptionally(Throwable cause) { CompletableFuture<T> result = new CompletableFuture<>(); result.completeExceptionally(cause); return result; }
3.68
hbase_Procedure_updateMetricsOnFinish
/** * This function will be called just after procedure execution is finished. Override this method * to update metrics at the end of the procedure. If {@link #getProcedureMetrics(Object)} returns * non-null {@link ProcedureMetrics}, the default implementation adds runtime of a procedure to a * time histogram for successfully completed procedures. Increments failed counter for failed * procedures. * <p/> * TODO: As any of the sub-procedures on failure rolls back all procedures in the stack, including * successfully finished siblings, this function may get called twice in certain cases for certain * procedures. Explore further if this can be called once. * @param env The environment passed to the procedure executor * @param runtime Runtime of the procedure in milliseconds * @param success true if procedure is completed successfully */ protected void updateMetricsOnFinish(TEnvironment env, long runtime, boolean success) { ProcedureMetrics metrics = getProcedureMetrics(env); if (metrics == null) { return; } if (success) { Histogram timeHisto = metrics.getTimeHisto(); if (timeHisto != null) { timeHisto.update(runtime); } } else { Counter failedCounter = metrics.getFailedCounter(); if (failedCounter != null) { failedCounter.increment(); } } }
3.68
flink_StrategyUtils_findDataType
/** * Finds a data type that is close to the given data type in terms of nullability and conversion * class but of the given logical root. */ static Optional<DataType> findDataType( CallContext callContext, boolean throwOnFailure, DataType actualDataType, LogicalTypeRoot expectedRoot, @Nullable Boolean expectedNullability) { final LogicalType actualType = actualDataType.getLogicalType(); return Optional.ofNullable(findDataTypeOfRoot(actualDataType, expectedRoot)) // set nullability .map( newDataType -> { if (Objects.equals(expectedNullability, Boolean.TRUE)) { return newDataType.nullable(); } else if (Objects.equals(expectedNullability, Boolean.FALSE)) { return newDataType.notNull(); } else if (actualType.isNullable()) { return newDataType.nullable(); } return newDataType.notNull(); }) // preserve bridging class if possible .map( newDataType -> { final Class<?> clazz = actualDataType.getConversionClass(); final LogicalType newType = newDataType.getLogicalType(); if (newType.supportsOutputConversion(clazz)) { return newDataType.bridgedTo(clazz); } return newDataType; }) // check if type can be implicitly casted .filter( newDataType -> { if (supportsImplicitCast(actualType, newDataType.getLogicalType())) { return true; } if (throwOnFailure) { throw callContext.newValidationError( "Unsupported argument type. Expected type root '%s' but actual type was '%s'.", expectedRoot, actualType); } return false; }); }
3.68
flink_StateAssignmentOperation_extractIntersectingState
/** * Extracts certain key group ranges from the given state handles and adds them to the * collector. */ @VisibleForTesting public static void extractIntersectingState( Collection<? extends KeyedStateHandle> originalSubtaskStateHandles, KeyGroupRange rangeToExtract, List<KeyedStateHandle> extractedStateCollector) { for (KeyedStateHandle keyedStateHandle : originalSubtaskStateHandles) { if (keyedStateHandle != null) { KeyedStateHandle intersectedKeyedStateHandle = keyedStateHandle.getIntersection(rangeToExtract); if (intersectedKeyedStateHandle != null) { extractedStateCollector.add(intersectedKeyedStateHandle); } } } }
3.68
hbase_AbstractProtobufWALReader_getCodecClsName
/** * Returns the cell codec classname */ public String getCodecClsName() { return codecClsName; }
3.68
framework_VFilterSelect_selectNextPage
/* * Show the next page. */ private void selectNextPage() { if (hasNextPage()) { filterOptions(currentPage + 1, lastFilter); selectPopupItemWhenResponseIsReceived = Select.FIRST; } }
3.68
hadoop_ListResultEntrySchema_withName
/** * Set the name value. * * @param name the name value to set * @return the ListEntrySchema object itself. */ public ListResultEntrySchema withName(String name) { this.name = name; return this; }
3.68
querydsl_AbstractJPASQLQuery_getSingleResult
/** * Transforms results using FactoryExpression if ResultTransformer can't be used * * @param query query * @return single result */ @Nullable private Object getSingleResult(Query query) { if (projection != null) { Object result = query.getSingleResult(); if (result != null) { if (!result.getClass().isArray()) { result = new Object[]{result}; } return projection.newInstance((Object[]) result); } else { return null; } } else { return query.getSingleResult(); } }
3.68
hmily_MetricsReporter_registerGauge
/** * Register gauge. * * @param name name * @param document document for gauge */ public static void registerGauge(final String name, final String document) { registerGauge(name, null, document); }
3.68
morf_DataSetHomology_getDifferences
/** * @return The list of differences detected by the comparison. */ public List<String> getDifferences() { return differences; }
3.68
open-banking-gateway_FinTechTokenService_validate
/** * service to check the XSRF Token. * * @param fintechToken * @return true, if token is valid. * In this demo, every token is valid, whose length is exaclty 16. */ public boolean validate(String fintechToken) { return fintechToken != null && fintechToken.length() == TOKEN_LENGTH; }
3.68
flink_MetricGroup_addGroup
/** * Creates a new MetricGroup and adds it to this groups sub-groups. * * @param name name of the group * @return the created group */ default MetricGroup addGroup(int name) { return addGroup(String.valueOf(name)); }
3.68
hadoop_HttpFSServerWebApp_destroy
/** * Shutdowns all running services. */ @Override public void destroy() { SERVER = null; if (metrics != null) { metrics.shutdown(); } super.destroy(); }
3.68
graphhopper_LandmarkStorage_setAreaIndex
/** * This method specifies the polygons which should be used to split the world wide area to improve performance and * quality in this scenario. */ public void setAreaIndex(AreaIndex<SplitArea> areaIndex) { this.areaIndex = areaIndex; }
3.68
flink_JobGraph_addVertex
/** * Adds a new task vertex to the job graph if it is not already included. * * @param vertex the new task vertex to be added */ public void addVertex(JobVertex vertex) { final JobVertexID id = vertex.getID(); JobVertex previous = taskVertices.put(id, vertex); // if we had a prior association, restore and throw an exception if (previous != null) { taskVertices.put(id, previous); throw new IllegalArgumentException( "The JobGraph already contains a vertex with that id."); } }
3.68
hadoop_WriteOperationHelper_abortMultipartUploadsUnderPath
/** * Abort multipart uploads under a path: limited to the first * few hundred. * @param prefix prefix for uploads to abort * @return a count of aborts * @throws IOException trouble; FileNotFoundExceptions are swallowed. */ @Retries.RetryTranslated public int abortMultipartUploadsUnderPath(String prefix) throws IOException { LOG.debug("Aborting multipart uploads under {}", prefix); int count = 0; List<MultipartUpload> multipartUploads = listMultipartUploads(prefix); LOG.debug("Number of outstanding uploads: {}", multipartUploads.size()); for (MultipartUpload upload: multipartUploads) { try { abortMultipartUpload(upload); count++; } catch (FileNotFoundException e) { LOG.debug("Already aborted: {}", upload.key(), e); } } return count; }
3.68
framework_ListSelectElement_clear
/** * Clear operation is not supported for List Select. This operation has no * effect on List Select element. */ @Override public void clear() { super.clear(); }
3.68
hbase_TraceUtil_tracedFutures
/** * Trace an asynchronous operation, and finish the create {@link Span} when all the given * {@code futures} are completed. */ public static <T> List<CompletableFuture<T>> tracedFutures(Supplier<List<CompletableFuture<T>>> action, Supplier<Span> spanSupplier) { Span span = spanSupplier.get(); try (Scope ignored = span.makeCurrent()) { List<CompletableFuture<T>> futures = action.get(); endSpan(CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])), span); return futures; } }
3.68
hbase_QuotaCache_getUserLimiter
/** * Returns the limiter associated to the specified user/table. * @param ugi the user to limit * @param table the table to limit * @return the limiter associated to the specified user/table */ public QuotaLimiter getUserLimiter(final UserGroupInformation ugi, final TableName table) { if (table.isSystemTable()) { return NoopQuotaLimiter.get(); } return getUserQuotaState(ugi).getTableLimiter(table); }
3.68
hbase_ImmutableBytesWritable_get
/** * Get the data from the BytesWritable. * @return The data is only valid between offset and offset+length. */ public byte[] get() { if (this.bytes == null) { throw new IllegalStateException( "Uninitialiized. Null constructor " + "called w/o accompaying readFields invocation"); } return this.bytes; }
3.68
pulsar_LedgerMetadataUtils_buildMetadataForCompactedLedger
/** * Build additional metadata for a CompactedLedger. * * @param compactedTopic reference to the compacted topic. * @param compactedToMessageId last mesasgeId. * @return an immutable map which describes the compacted ledger */ public static Map<String, byte[]> buildMetadataForCompactedLedger(String compactedTopic, byte[] compactedToMessageId) { return Map.of( METADATA_PROPERTY_APPLICATION, METADATA_PROPERTY_APPLICATION_PULSAR, METADATA_PROPERTY_COMPONENT, METADATA_PROPERTY_COMPONENT_COMPACTED_LEDGER, METADATA_PROPERTY_COMPACTEDTOPIC, compactedTopic.getBytes(StandardCharsets.UTF_8), METADATA_PROPERTY_COMPACTEDTO, compactedToMessageId ); }
3.68
hbase_MiniBatchOperationInProgress_addOperationsFromCP
/** * Add more Mutations corresponding to the Mutation at the given index to be committed atomically * in the same batch. These mutations are applied to the WAL and applied to the memstore as well. * The timestamp of the cells in the given Mutations MUST be obtained from the original mutation. * <b>Note:</b> The durability from CP will be replaced by the durability of corresponding * mutation. <b>Note:</b> Currently only supports Put and Delete operations. * @param index the index that corresponds to the original mutation index in the batch * @param newOperations the Mutations to add */ public void addOperationsFromCP(int index, Mutation[] newOperations) { if (this.operationsFromCoprocessors == null) { // lazy allocation to save on object allocation in case this is not used this.operationsFromCoprocessors = new Mutation[operations.length][]; } this.operationsFromCoprocessors[getAbsoluteIndex(index)] = newOperations; }
3.68
flink_Tuple21_of
/** * Creates a new tuple and assigns the given values to the tuple's fields. This is more * convenient than using the constructor, because the compiler can infer the generic type * arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new * Tuple3<Integer, Double, String>(n, x, s)} */ public static < T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20> Tuple21< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20> of( T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12, T13 f13, T14 f14, T15 f15, T16 f16, T17 f17, T18 f18, T19 f19, T20 f20) { return new Tuple21<>( f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20); }
3.68
hudi_HoodieAvroUtils_toJavaDate
/** * convert days to Date * <p> * NOTE: This method could only be used in tests * * @VisibleForTesting */ public static java.sql.Date toJavaDate(int days) { LocalDate date = LocalDate.ofEpochDay(days); ZoneId defaultZoneId = ZoneId.systemDefault(); ZonedDateTime zonedDateTime = date.atStartOfDay(defaultZoneId); return new java.sql.Date(zonedDateTime.toInstant().toEpochMilli()); }
3.68
framework_VTabsheetBase_isTabCaptionsAsHtml
/** * Checks whether captions are rendered as HTML * * The default is false, i.e. render tab captions as plain text * * @since 7.4 * @return true if the captions are rendered as HTML, false if rendered as * plain text */ public boolean isTabCaptionsAsHtml() { return tabCaptionsAsHtml; }
3.68
dubbo_ClassHelper_getClassLoader
/** * Return the default ClassLoader to use: typically the thread context * ClassLoader, if available; the ClassLoader that loaded the ClassUtils * class will be used as fallback. * <p> * Call this method if you intend to use the thread context ClassLoader in a * scenario where you absolutely need a non-null ClassLoader reference: for * example, for class path resource loading (but not necessarily for * <code>Class.forName</code>, which accepts a <code>null</code> ClassLoader * reference as well). * * @return the default ClassLoader (never <code>null</code>) * @see java.lang.Thread#getContextClassLoader() */ public static ClassLoader getClassLoader() { return getClassLoader(ClassHelper.class); }
3.68
framework_TreeData_setParent
/** * Moves an item to become a child of the given parent item. The new parent * item must exist in the hierarchy. Setting the parent to {@code null} * makes the item a root item. After making changes to the tree data, * {@link TreeDataProvider#refreshAll()} should be called. * * @param item * the item to be set as the child of {@code parent} * @param parent * the item to be set as parent or {@code null} to set the item * as root * @since 8.1 */ public void setParent(T item, T parent) { if (!contains(item)) { throw new IllegalArgumentException( "Item '" + item + "' not in the hierarchy"); } if (parent != null && !contains(parent)) { throw new IllegalArgumentException( "Parent needs to be added before children. " + "To set as root item, call with parent as null"); } if (item.equals(parent)) { throw new IllegalArgumentException( "Item cannot be the parent of itself"); } T oldParent = itemToWrapperMap.get(item).getParent(); if (!Objects.equals(oldParent, parent)) { // Remove item from old parent's children itemToWrapperMap.get(oldParent).removeChild(item); // Add item to parent's children itemToWrapperMap.get(parent).addChild(item); // Set item's new parent itemToWrapperMap.get(item).setParent(parent); } }
3.68
morf_SqlServerDialect_getSqlForNow
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForNow(org.alfasoftware.morf.sql.element.Function) */ @Override protected String getSqlForNow(Function function) { return "GETUTCDATE()"; }
3.68
hadoop_OperationDuration_humanTime
/** * Convert to a human time of minutes:seconds.millis. * @param time time to humanize. * @return a printable value. */ public static String humanTime(long time) { long seconds = (time / 1000); long minutes = (seconds / 60); return String.format("%d:%02d.%03ds", minutes, seconds % 60, time % 1000); }
3.68
flink_CompactingHashTable_close
/** * Closes the hash table. This effectively releases all internal structures and closes all open * files and removes them. The call to this method is valid both as a cleanup after the complete * inputs were properly processed, and as an cancellation call, which cleans up all resources * that are currently held by the hash join. If another process still access the hash table * after close has been called no operations will be performed. */ @Override public void close() { // make sure that we close only once synchronized (this.stateLock) { if (this.closed) { return; } this.closed = true; } LOG.debug("Closing hash table and releasing resources."); // release the table structure releaseTable(); // clear the memory in the partitions clearPartitions(); }
3.68
hudi_CommitUtils_getValidCheckpointForCurrentWriter
/** * Process previous commits metadata in the timeline to determine the checkpoint given a checkpoint key. * NOTE: This is very similar in intent to DeltaSync#getLatestCommitMetadataWithValidCheckpointInfo except that * different deployment models (deltastreamer or spark structured streaming) could have different checkpoint keys. * * @param timeline completed commits in active timeline. * @param checkpointKey the checkpoint key in the extra metadata of the commit. * @param keyToLookup key of interest for which checkpoint is looked up for. * @return An optional commit metadata with latest checkpoint. */ public static Option<String> getValidCheckpointForCurrentWriter(HoodieTimeline timeline, String checkpointKey, String keyToLookup) { return (Option<String>) timeline.getWriteTimeline().filterCompletedInstants().getReverseOrderedInstants() .map(instant -> { try { HoodieCommitMetadata commitMetadata = HoodieCommitMetadata .fromBytes(timeline.getInstantDetails(instant).get(), HoodieCommitMetadata.class); // process commits only with checkpoint entries String checkpointValue = commitMetadata.getMetadata(checkpointKey); if (StringUtils.nonEmpty(checkpointValue)) { // return if checkpoint for "keyForLookup" exists. return readCheckpointValue(checkpointValue, keyToLookup); } else { return Option.empty(); } } catch (IOException e) { throw new HoodieIOException("Failed to parse HoodieCommitMetadata for " + instant.toString(), e); } }).filter(Option::isPresent).findFirst().orElse(Option.empty()); }
3.68
hadoop_ManifestCommitterSupport_manifestTempPathForTaskAttempt
/** * Get the path in the manifest subdir for the temp path to save a * task attempt's manifest before renaming it to the * path defined by {@link #manifestPathForTask(Path, String)}. * @param manifestDir manifest directory * @param taskAttemptId task attempt ID. * @return the path to save/load the manifest. */ public static Path manifestTempPathForTaskAttempt(Path manifestDir, String taskAttemptId) { return new Path(manifestDir, taskAttemptId + MANIFEST_SUFFIX + TMP_SUFFIX); }
3.68
pulsar_AbstractDispatcherSingleActiveConsumer_disconnectAllConsumers
/** * Disconnect all consumers on this dispatcher (server side close). This triggers channelInactive on the inbound * handler which calls dispatcher.removeConsumer(), where the closeFuture is completed * * @return */ public synchronized CompletableFuture<Void> disconnectAllConsumers(boolean isResetCursor) { closeFuture = new CompletableFuture<>(); if (!consumers.isEmpty()) { consumers.forEach(consumer -> consumer.disconnect(isResetCursor)); cancelPendingRead(); } else { // no consumer connected, complete disconnect immediately closeFuture.complete(null); } return closeFuture; }
3.68
framework_TabSheet_isUserOriginated
/** * {@inheritDoc} * * @since 8.1 */ @Override public boolean isUserOriginated() { return userOriginated; }
3.68
hadoop_S3ARemoteInputStream_getS3AStreamStatistics
/** * Access the input stream statistics. * This is for internal testing and may be removed without warning. * @return the statistics for this input stream */ @InterfaceAudience.Private @InterfaceStability.Unstable public S3AInputStreamStatistics getS3AStreamStatistics() { return streamStatistics; }
3.68
flink_MemorySegment_getFloatLittleEndian
/** * Reads a single-precision floating point value (32bit, 4 bytes) from the given position, in * little endian byte order. This method's speed depends on the system's native byte order, and * it is possibly slower than {@link #getFloat(int)}. For most cases (such as transient storage * in memory or serialization for I/O and network), it suffices to know that the byte order in * which the value is written is the same as the one in which it is read, and {@link * #getFloat(int)} is the preferable choice. * * @param index The position from which the value will be read. * @return The long value at the given position. * @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the * segment size minus 4. */ public float getFloatLittleEndian(int index) { return Float.intBitsToFloat(getIntLittleEndian(index)); }
3.68
flink_FileCompactStrategy_setSizeThreshold
/** * Optional, compaction will be triggered when the total size of compacting files reaches * the threshold. -1 by default, indicating the size is unlimited. */ public FileCompactStrategy.Builder setSizeThreshold(long sizeThreshold) { this.sizeThreshold = sizeThreshold; return this; }
3.68
pulsar_PulsarAdminImpl_namespaces
/** * @return the namespaces management object */ public Namespaces namespaces() { return namespaces; }
3.68
flink_CheckpointStatsCounts_getTotalNumberOfCheckpoints
/** * Returns the total number of checkpoints (in progress, completed, failed). * * @return Total number of checkpoints. */ public long getTotalNumberOfCheckpoints() { return numTotalCheckpoints; }
3.68
flink_HadoopInputs_readSequenceFile
/** * Creates a Flink {@link InputFormat} to read a Hadoop sequence file for the given key and * value classes. * * @return A Flink InputFormat that wraps a Hadoop SequenceFileInputFormat. */ public static <K, V> HadoopInputFormat<K, V> readSequenceFile( Class<K> key, Class<V> value, String inputPath) throws IOException { return readHadoopFile( new org.apache.hadoop.mapred.SequenceFileInputFormat<K, V>(), key, value, inputPath); }
3.68
flink_RocksDBMemoryControllerUtils_calculateRocksDBMutableLimit
/** * Calculate {@code mutable_limit_} as RocksDB calculates it in <a * href="https://github.com/dataArtisans/frocksdb/blob/FRocksDB-5.17.2/memtable/write_buffer_manager.cc#L54"> * here</a>. * * @param bufferSize write buffer size * @return mutableLimit */ static long calculateRocksDBMutableLimit(long bufferSize) { return bufferSize * 7 / 8; }
3.68
druid_DruidDataSource_isMysqlOrMariaDBUrl
/** * Issue 5192,Issue 5457 * @see <a href="https://dev.mysql.com/doc/connector-j/8.1/en/connector-j-reference-jdbc-url-format.html">MySQL Connection URL Syntax</a> * @see <a href="https://mariadb.com/kb/en/about-mariadb-connector-j/">About MariaDB Connector/J</a> * @param jdbcUrl * @return */ private static boolean isMysqlOrMariaDBUrl(String jdbcUrl) { return jdbcUrl.startsWith("jdbc:mysql://") || jdbcUrl.startsWith("jdbc:mysql:loadbalance://") || jdbcUrl.startsWith("jdbc:mysql:replication://") || jdbcUrl.startsWith("jdbc:mariadb://") || jdbcUrl.startsWith("jdbc:mariadb:loadbalance://") || jdbcUrl.startsWith("jdbc:mariadb:replication://"); }
3.68
pulsar_FunctionApiResource_clientAppId
/** * @deprecated use {@link #authParams()} instead. */ @Deprecated public String clientAppId() { return httpRequest != null ? (String) httpRequest.getAttribute(AuthenticationFilter.AuthenticatedRoleAttributeName) : null; }
3.68
flink_TypeStrategies_nullableIfAllArgs
/** * A type strategy that can be used to make a result type nullable if all the input arguments is * nullable. Otherwise the type will be not null. */ public static TypeStrategy nullableIfAllArgs(TypeStrategy initialStrategy) { return nullableIfAllArgs(ConstantArgumentCount.any(), initialStrategy); }
3.68
framework_Escalator_getScrollLeft
/** * Returns the logical horizontal scroll offset. Note that this is not * necessarily the same as the {@code scrollLeft} attribute in the DOM. * * @return the logical horizontal scroll offset */ public double getScrollLeft() { return horizontalScrollbar.getScrollPos(); }
3.68
flink_HiveParserTypeCheckProcFactory_getBoolExprProcessor
/** Factory method to get BoolExprProcessor. */ public HiveParserTypeCheckProcFactory.BoolExprProcessor getBoolExprProcessor() { return new HiveParserTypeCheckProcFactory.BoolExprProcessor(); }
3.68
hadoop_SelectEventStreamPublisher_cancel
/** * Cancel the operation. */ public void cancel() { selectOperationFuture.cancel(true); }
3.68
hadoop_ConnectionContext_isActive
/** * Check if the connection is active. * * @return True if the connection is active. */ public synchronized boolean isActive() { return this.numThreads > 0; }
3.68
flink_JobMaster_acknowledgeCheckpoint
// TODO: This method needs a leader session ID @Override public void acknowledgeCheckpoint( final JobID jobID, final ExecutionAttemptID executionAttemptID, final long checkpointId, final CheckpointMetrics checkpointMetrics, @Nullable final SerializedValue<TaskStateSnapshot> checkpointState) { schedulerNG.acknowledgeCheckpoint( jobID, executionAttemptID, checkpointId, checkpointMetrics, deserializeTaskStateSnapshot(checkpointState, getClass().getClassLoader())); }
3.68
flink_QueryableStateClient_shutdownAndWait
/** * Shuts down the client and waits until shutdown is completed. * * <p>If an exception is thrown, a warning is logged containing the exception message. */ public void shutdownAndWait() { try { client.shutdown().get(); LOG.info("The Queryable State Client was shutdown successfully."); } catch (Exception e) { LOG.warn("The Queryable State Client shutdown failed: ", e); } }
3.68
framework_CvalChecker_computeLicenseName
/** * Given a product name returns the name of the file with the license key. * * Traditionally we have delivered license keys with a name like * 'vaadin.touchkit.developer.license' but our database product name is * 'vaadin-touchkit' so we have to replace '-' by '.' to maintain * compatibility. */ static final String computeLicenseName(String productName) { return productName.replace("-", ".") + ".developer.license"; }
3.68
flink_AbstractMetricGroup_addMetric
/** * Adds the given metric to the group and registers it at the registry, if the group is not yet * closed, and if no metric with the same name has been registered before. * * @param name the name to register the metric under * @param metric the metric to register */ protected void addMetric(String name, Metric metric) { if (metric == null) { LOG.warn( "Ignoring attempted registration of a metric due to being null for name {}.", name); return; } // add the metric only if the group is still open synchronized (this) { if (!closed) { // immediately put without a 'contains' check to optimize the common case (no // collision) // collisions are resolved later Metric prior = metrics.put(name, metric); // check for collisions with other metric names if (prior == null) { // no other metric with this name yet if (groups.containsKey(name)) { // we warn here, rather than failing, because metrics are tools that should // not fail the // program when used incorrectly LOG.warn( "Name collision: Adding a metric with the same name as a metric subgroup: '" + name + "'. Metric might not get properly reported. " + Arrays.toString(scopeComponents)); } registry.register(metric, name, this); } else { // we had a collision. put back the original value metrics.put(name, prior); // we warn here, rather than failing, because metrics are tools that should not // fail the // program when used incorrectly LOG.warn( "Name collision: Group already contains a Metric with the name '" + name + "'. Metric will not be reported." + Arrays.toString(scopeComponents)); } } } }
3.68
hmily_HmilyTransactionHolder_getCurrentTransaction
/** * acquired by threadLocal. * * @return {@linkplain HmilyTransaction} */ public HmilyTransaction getCurrentTransaction() { return CURRENT.get(); }
3.68
flink_UpsertTestFileUtil_writeRecords
/** * Writes a Map of records serialized by the {@link UpsertTestSinkWriter} to the given * BufferedOutputStream. * * @param bos the BufferedOutputStream to write to * @param records the Map of records created by the UpsertTestSinkWriter * @throws IOException */ public static void writeRecords( BufferedOutputStream bos, Map<ImmutableByteArrayWrapper, ImmutableByteArrayWrapper> records) throws IOException { checkNotNull(bos); for (Map.Entry<ImmutableByteArrayWrapper, ImmutableByteArrayWrapper> record : records.entrySet()) { byte[] key = record.getKey().array(); byte[] value = record.getValue().array(); bos.write(MAGIC_BYTE); bos.write(key.length); bos.write(key); bos.write(value.length); bos.write(value); } bos.flush(); }
3.68
hbase_RowCountEndpoint_start
/** * Stores a reference to the coprocessor environment provided by the * {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} from the region where this * coprocessor is loaded. Since this is a coprocessor endpoint, it always expects to be loaded on * a table region, so always expects this to be an instance of * {@link RegionCoprocessorEnvironment}. * @param env the environment provided by the coprocessor host * @throws IOException if the provided environment is not an instance of * {@code RegionCoprocessorEnvironment} */ @Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment) { this.env = (RegionCoprocessorEnvironment) env; } else { throw new CoprocessorException("Must be loaded on a table region!"); } }
3.68
hbase_HRegion_startBulkRegionOperation
/** * This method needs to be called before any public call that reads or modifies stores in bulk. It * has to be called just before a try. #closeBulkRegionOperation needs to be called in the try's * finally block Acquires a writelock and checks if the region is closing or closed. * @throws NotServingRegionException when the region is closing or closed * @throws RegionTooBusyException if failed to get the lock in time * @throws InterruptedIOException if interrupted while waiting for a lock */ private void startBulkRegionOperation(boolean writeLockNeeded) throws IOException { if (this.closing.get()) { throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closing"); } if (writeLockNeeded) lock(lock.writeLock()); else lock(lock.readLock()); if (this.closed.get()) { if (writeLockNeeded) lock.writeLock().unlock(); else lock.readLock().unlock(); throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closed"); } regionLockHolders.put(Thread.currentThread(), true); }
3.68
flink_FutureUtils_runAfterwardsAsync
/** * Run the given action after the completion of the given future. The given future can be * completed normally or exceptionally. In case of an exceptional completion the action's * exception will be added to the initial exception. * * @param future to wait for its completion * @param runnable action which is triggered after the future's completion * @param executor to run the given action * @return Future which is completed after the action has completed. This future can contain an * exception, if an error occurred in the given future or action. */ public static CompletableFuture<Void> runAfterwardsAsync( CompletableFuture<?> future, RunnableWithException runnable, Executor executor) { final CompletableFuture<Void> resultFuture = new CompletableFuture<>(); future.whenCompleteAsync( (Object ignored, Throwable throwable) -> { try { runnable.run(); } catch (Throwable e) { throwable = ExceptionUtils.firstOrSuppressed(e, throwable); } if (throwable != null) { resultFuture.completeExceptionally(throwable); } else { resultFuture.complete(null); } }, executor); return resultFuture; }
3.68
zxing_URIParsedResult_massageURI
/** * Transforms a string that represents a URI into something more proper, by adding or canonicalizing * the protocol. */ private static String massageURI(String uri) { uri = uri.trim(); int protocolEnd = uri.indexOf(':'); if (protocolEnd < 0 || isColonFollowedByPortNumber(uri, protocolEnd)) { // No protocol, or found a colon, but it looks like it is after the host, so the protocol is still missing, // so assume http uri = "http://" + uri; } return uri; }
3.68
flink_RestServerEndpointConfiguration_getRestAddress
/** @see RestOptions#ADDRESS */ public String getRestAddress() { return restAddress; }
3.68
hbase_AsyncTableRegionLocator_getStartEndKeys
/** * Gets the starting and ending row keys for every region in the currently open table. * <p> * This is mainly useful for the MapReduce integration. * @return Pair of arrays of region starting and ending row keys */ default CompletableFuture<List<Pair<byte[], byte[]>>> getStartEndKeys() { return getAllRegionLocations().thenApply( locs -> locs.stream().filter(loc -> RegionReplicaUtil.isDefaultReplica(loc.getRegion())) .map(HRegionLocation::getRegion).map(r -> Pair.newPair(r.getStartKey(), r.getEndKey())) .collect(Collectors.toList())); }
3.68
hadoop_ResourceSkyline_setSkylineList
/** * Set skylineList. * * @param skylineListConfig skylineList. */ public final void setSkylineList( final RLESparseResourceAllocation skylineListConfig) { this.skylineList = skylineListConfig; }
3.68
hadoop_RawErasureDecoder_allowChangeInputs
/** * Allow change into input buffers or not while perform encoding/decoding. * @return true if it's allowed to change inputs, false otherwise */ public boolean allowChangeInputs() { return coderOptions.allowChangeInputs(); }
3.68
hbase_TableRecordReader_initialize
/** * Initializes the reader. * @param inputsplit The split to work with. * @param context The current task context. * @throws IOException When setting up the reader fails. * @throws InterruptedException When the job is aborted. * @see org.apache.hadoop.mapreduce.RecordReader#initialize( * org.apache.hadoop.mapreduce.InputSplit, org.apache.hadoop.mapreduce.TaskAttemptContext) */ @Override public void initialize(InputSplit inputsplit, TaskAttemptContext context) throws IOException, InterruptedException { this.recordReaderImpl.initialize(inputsplit, context); }
3.68
framework_CacheFlushNotifier_addInstance
/** * Adds the given SQLContainer to the cache flush notification receiver list * * @param c * Container to add */ public static void addInstance(SQLContainer c) { removeDeadReferences(); if (c != null) { allInstances.add(new WeakReference<SQLContainer>(c, deadInstances)); } }
3.68
hadoop_JobContextImpl_getJobConf
/** * Get the job Configuration * * @return JobConf */ public JobConf getJobConf() { return job; }
3.68
hadoop_SliderFileSystem_getComponentDir
/** * Returns the component directory path. * * @param serviceVersion service version * @param compName component name * @return component directory */ public Path getComponentDir(String serviceVersion, String compName) { return new Path(new Path(getAppDir(), "components"), serviceVersion + "/" + compName); }
3.68
flink_HsMemoryDataSpiller_writeBuffers
/** Write all buffers to disk. */ private void writeBuffers(List<BufferWithIdentity> bufferWithIdentities, long expectedBytes) throws IOException { if (bufferWithIdentities.isEmpty()) { return; } ByteBuffer[] bufferWithHeaders = new ByteBuffer[2 * bufferWithIdentities.size()]; for (int i = 0; i < bufferWithIdentities.size(); i++) { Buffer buffer = bufferWithIdentities.get(i).getBuffer(); setBufferWithHeader(buffer, bufferWithHeaders, 2 * i); } BufferReaderWriterUtil.writeBuffers(dataFileChannel, expectedBytes, bufferWithHeaders); totalBytesWritten += expectedBytes; }
3.68
framework_VOptionGroupBase_getRows
/** * For internal use only. May be removed or replaced in the future. * * @return "rows" specified in uidl, 0 if not specified */ public int getRows() { return rows; }
3.68
framework_RequiredIndicatorForFieldsWithoutCaption_getTestDescription
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTestDescription() */ @Override protected String getTestDescription() { return "Test for ensuring that the required indicator is visible for fields even when they would not otherwise have a caption"; }
3.68
framework_VAbstractCalendarPanel_buildCalendarBody
/** * Builds the day and time selectors of the calendar. */ @SuppressWarnings("deprecation") private void buildCalendarBody() { final int weekColumn = 0; final int firstWeekdayColumn = 1; final int headerRow = 0; setWidget(1, 0, days); setCellPadding(0); setCellSpacing(0); getFlexCellFormatter().setColSpan(1, 0, 5); getFlexCellFormatter().setStyleName(1, 0, getDateField().getStylePrimaryName() + "-calendarpanel-body"); days.getFlexCellFormatter().setStyleName(headerRow, weekColumn, "v-week"); days.setHTML(headerRow, weekColumn, "<strong></strong>"); // Hide the week column if week numbers are not to be displayed. days.getFlexCellFormatter().setVisible(headerRow, weekColumn, isShowISOWeekNumbers()); days.getRowFormatter().setStyleName(headerRow, getDateField().getStylePrimaryName() + "-calendarpanel-weekdays"); if (isShowISOWeekNumbers()) { days.getFlexCellFormatter().setStyleName(headerRow, weekColumn, "v-first"); days.getFlexCellFormatter().setStyleName(headerRow, firstWeekdayColumn, ""); days.getRowFormatter().addStyleName(headerRow, getDateField().getStylePrimaryName() + "-calendarpanel-weeknumbers"); } else { days.getFlexCellFormatter().setStyleName(headerRow, weekColumn, ""); days.getFlexCellFormatter().setStyleName(headerRow, firstWeekdayColumn, "v-first"); } days.getFlexCellFormatter().setStyleName(headerRow, firstWeekdayColumn + 6, "v-last"); // Print weekday names final int firstDay = getDateTimeService().getFirstDayOfWeek(); for (int i = 0; i < 7; i++) { int day = (i + firstDay) % 7; if (isBelowMonth(getResolution())) { days.setHTML(headerRow, firstWeekdayColumn + i, "<strong>" + getDateTimeService().getShortDay(day) + "</strong>"); } else { days.setHTML(headerRow, firstWeekdayColumn + i, ""); } Roles.getColumnheaderRole().set(days.getCellFormatter() .getElement(headerRow, firstWeekdayColumn + i)); } // Zero out hours, minutes, seconds, and milliseconds to compare dates // without time part final Date tmp = new Date(); final Date today = new Date(tmp.getYear(), tmp.getMonth(), tmp.getDate()); final Date selectedDate = value == null ? null : new Date(value.getYear(), value.getMonth(), value.getDate()); final int startWeekDay = getDateTimeService() .getStartWeekDay(displayedMonth); final Date curr = (Date) displayedMonth.clone(); // Start from the first day of the week that at least partially belongs // to the current month curr.setDate(1 - startWeekDay); // No month has more than 6 weeks so 6 is a safe maximum for rows. for (int weekOfMonth = 1; weekOfMonth < 7; weekOfMonth++) { for (int dayOfWeek = 0; dayOfWeek < 7; dayOfWeek++) { // Actually write the day of month Date dayDate = (Date) curr.clone(); Day day = new Day(dayDate); // Set ID with prefix of the calendar panel's ID day.getElement().setId(getElement().getId() + "-" + weekOfMonth + "-" + dayOfWeek); // Set assistive label to read focused date and month/year Roles.getButtonRole().set(day.getElement()); Roles.getButtonRole().setAriaLabelledbyProperty( day.getElement(), Id.of(day.getElement()), Id.of(getFlexCellFormatter().getElement(0, 2))); day.setStyleName(getDateField().getStylePrimaryName() + "-calendarpanel-day"); if (!isDateInsideRange(dayDate, getResolution(this::isDay))) { day.addStyleDependentName(CN_OUTSIDE_RANGE); } if (curr.equals(selectedDate)) { day.addStyleDependentName(CN_SELECTED); Roles.getGridcellRole().setAriaSelectedState( day.getElement(), SelectedValue.TRUE); selectedDay = day; } if (curr.equals(today)) { day.addStyleDependentName(CN_TODAY); } if (curr.equals(focusedDate)) { focusedDay = day; if (hasFocus) { day.addStyleDependentName(CN_FOCUSED); // Reference focused day from calendar panel Roles.getGridRole().setAriaActivedescendantProperty( getElement(), Id.of(day.getElement())); } } if (curr.getMonth() != displayedMonth.getMonth()) { day.addStyleDependentName(CN_OFFMONTH); } String dayDateString = df.format(dayDate); if (dateStyles.containsKey(dayDateString)) { day.addStyleName(dateStyles.get(dayDateString)); } days.setWidget(weekOfMonth, firstWeekdayColumn + dayOfWeek, day); Roles.getGridcellRole().set(days.getCellFormatter().getElement( weekOfMonth, firstWeekdayColumn + dayOfWeek)); // ISO week numbers if requested days.getCellFormatter().setVisible(weekOfMonth, weekColumn, isShowISOWeekNumbers()); if (isShowISOWeekNumbers()) { final String baseCssClass = getDateField() .getStylePrimaryName() + "-calendarpanel-weeknumber"; String weekCssClass = baseCssClass; int weekNumber = DateTimeService.getISOWeekNumber(curr); days.setHTML(weekOfMonth, 0, "<span class=\"" + weekCssClass + "\"" + ">" + weekNumber + "</span>"); } curr.setDate(curr.getDate() + 1); } } }
3.68
flink_Pattern_subtype
/** * Applies a subtype constraint on the current pattern. This means that an event has to be of * the given subtype in order to be matched. * * @param subtypeClass Class of the subtype * @param <S> Type of the subtype * @return The same pattern with the new subtype constraint */ public <S extends F> Pattern<T, S> subtype(final Class<S> subtypeClass) { Preconditions.checkNotNull(subtypeClass, "The class cannot be null."); if (condition == null) { this.condition = new SubtypeCondition<F>(subtypeClass); } else { this.condition = new RichAndCondition<>(condition, new SubtypeCondition<F>(subtypeClass)); } @SuppressWarnings("unchecked") Pattern<T, S> result = (Pattern<T, S>) this; return result; }
3.68
framework_VTooltip_showAssistive
/** * Show the tooltip with the provided info for assistive devices. * * @param info * with the content of the tooltip */ public void showAssistive(TooltipInfo info) { updatePosition(null, true); setTooltipText(info); showTooltip(); }
3.68
AreaShop_FileManager_getRentNames
/** * Get a list of names of all rent regions. * @return A String list with all the names */ public List<String> getRentNames() { ArrayList<String> result = new ArrayList<>(); for(RentRegion region : getRents()) { result.add(region.getName()); } return result; }
3.68
framework_Slot_setExpandRatio
/** * Set how the slot should be expanded relative to the other slots. 0 means * that the slot should not participate in the division of space based on * the expand ratios but instead be allocated space based on its natural * size. Other values causes the slot to get a share of the otherwise * unallocated space in proportion to the slot's expand ratio value. * * @param expandRatio * The ratio of the space the slot should occupy * */ public void setExpandRatio(double expandRatio) { this.expandRatio = expandRatio; }
3.68
framework_Range_between
/** * Creates a range between two integers. * <p> * The range start is <em>inclusive</em> and the end is <em>exclusive</em>. * So, a range "between" 0 and 5 represents the numbers 0, 1, 2, 3 and 4, * but not 5. * * @param start * the start of the the range, inclusive * @param end * the end of the range, exclusive * @return a range representing <code>[start..end[</code> * @throws IllegalArgumentException * if <code>start &gt; end</code> */ public static Range between(final int start, final int end) throws IllegalArgumentException { return new Range(start, end); }
3.68
hbase_ServerManager_getOnlineServers
/** Returns Read-only map of servers to serverinfo */ public Map<ServerName, ServerMetrics> getOnlineServers() { // Presumption is that iterating the returned Map is OK. synchronized (this.onlineServers) { return Collections.unmodifiableMap(this.onlineServers); } }
3.68
dubbo_NacosConnectionManager_createNamingService
/** * Create an instance of {@link NamingService} from specified {@link URL connection url} * * @return {@link NamingService} */ protected NamingService createNamingService() { Properties nacosProperties = buildNacosProperties(this.connectionURL); NamingService namingService = null; try { for (int i = 0; i < retryTimes + 1; i++) { namingService = NacosFactory.createNamingService(nacosProperties); String serverStatus = namingService.getServerStatus(); boolean namingServiceAvailable = testNamingService(namingService); if (!check || (UP.equals(serverStatus) && namingServiceAvailable)) { break; } else { logger.warn( LoggerCodeConstants.REGISTRY_NACOS_EXCEPTION, "", "", "Failed to connect to nacos naming server. " + "Server status: " + serverStatus + ". " + "Naming Service Available: " + namingServiceAvailable + ". " + (i < retryTimes ? "Dubbo will try to retry in " + sleepMsBetweenRetries + ". " : "Exceed retry max times.") + "Try times: " + (i + 1)); } namingService.shutDown(); namingService = null; Thread.sleep(sleepMsBetweenRetries); } } catch (NacosException e) { if (logger.isErrorEnabled()) { logger.error(REGISTRY_NACOS_EXCEPTION, "", "", e.getErrMsg(), e); } } catch (InterruptedException e) { logger.error(INTERNAL_INTERRUPTED, "", "", "Interrupted when creating nacos naming service client.", e); Thread.currentThread().interrupt(); throw new IllegalStateException(e); } if (namingService == null) { logger.error( REGISTRY_NACOS_EXCEPTION, "", "", "Failed to create nacos naming service client. Reason: server status check failed."); throw new IllegalStateException( "Failed to create nacos naming service client. Reason: server status check failed."); } return namingService; }
3.68
hbase_BalancerClusterState_computeCachedLocalities
/** * Computes and caches the locality for each region/rack combinations, as well as storing a * mapping of region -> server and region -> rack such that server and rack have the highest * locality for region */ private void computeCachedLocalities() { rackLocalities = new float[numRegions][numRacks]; regionsToMostLocalEntities = new int[LocalityType.values().length][numRegions]; // Compute localities and find most local server per region for (int region = 0; region < numRegions; region++) { int serverWithBestLocality = 0; float bestLocalityForRegion = 0; for (int server = 0; server < numServers; server++) { // Aggregate per-rack locality float locality = getLocalityOfRegion(region, server); int rack = serverIndexToRackIndex[server]; int numServersInRack = serversPerRack[rack].length; rackLocalities[region][rack] += locality / numServersInRack; if (locality > bestLocalityForRegion) { serverWithBestLocality = server; bestLocalityForRegion = locality; } } regionsToMostLocalEntities[LocalityType.SERVER.ordinal()][region] = serverWithBestLocality; // Find most local rack per region int rackWithBestLocality = 0; float bestRackLocalityForRegion = 0.0f; for (int rack = 0; rack < numRacks; rack++) { float rackLocality = rackLocalities[region][rack]; if (rackLocality > bestRackLocalityForRegion) { bestRackLocalityForRegion = rackLocality; rackWithBestLocality = rack; } } regionsToMostLocalEntities[LocalityType.RACK.ordinal()][region] = rackWithBestLocality; } }
3.68
dubbo_URLStrParser_parseURLBody
/** * @param fullURLStr : fullURLString * @param decodedBody : format: [protocol://][username:password@][host:port]/[path] * @param parameters : * @return URL */ private static URL parseURLBody(String fullURLStr, String decodedBody, Map<String, String> parameters) { int starIdx = 0, endIdx = decodedBody.length(); // ignore the url content following '#' int poundIndex = decodedBody.indexOf('#'); if (poundIndex != -1) { endIdx = poundIndex; } String protocol = null; int protoEndIdx = decodedBody.indexOf("://"); if (protoEndIdx >= 0) { if (protoEndIdx == 0) { throw new IllegalStateException("url missing protocol: \"" + fullURLStr + "\""); } protocol = decodedBody.substring(0, protoEndIdx); starIdx = protoEndIdx + 3; } else { // case: file:/path/to/file.txt protoEndIdx = decodedBody.indexOf(":/"); if (protoEndIdx >= 0) { if (protoEndIdx == 0) { throw new IllegalStateException("url missing protocol: \"" + fullURLStr + "\""); } protocol = decodedBody.substring(0, protoEndIdx); starIdx = protoEndIdx + 1; } } String path = null; int pathStartIdx = indexOf(decodedBody, '/', starIdx, endIdx); if (pathStartIdx >= 0) { path = decodedBody.substring(pathStartIdx + 1, endIdx); endIdx = pathStartIdx; } String username = null; String password = null; int pwdEndIdx = lastIndexOf(decodedBody, '@', starIdx, endIdx); if (pwdEndIdx > 0) { int passwordStartIdx = indexOf(decodedBody, ':', starIdx, pwdEndIdx); if (passwordStartIdx != -1) { // tolerate incomplete user pwd input, like '1234@' username = decodedBody.substring(starIdx, passwordStartIdx); password = decodedBody.substring(passwordStartIdx + 1, pwdEndIdx); } else { username = decodedBody.substring(starIdx, pwdEndIdx); } starIdx = pwdEndIdx + 1; } String host = null; int port = 0; int hostEndIdx = lastIndexOf(decodedBody, ':', starIdx, endIdx); if (hostEndIdx > 0 && hostEndIdx < decodedBody.length() - 1) { if (lastIndexOf(decodedBody, '%', starIdx, endIdx) > hostEndIdx) { // ipv6 address with scope id // e.g. fe80:0:0:0:894:aeec:f37d:23e1%en0 // see https://howdoesinternetwork.com/2013/ipv6-zone-id // ignore } else { port = Integer.parseInt(decodedBody.substring(hostEndIdx + 1, endIdx)); endIdx = hostEndIdx; } } if (endIdx > starIdx) { host = decodedBody.substring(starIdx, endIdx); } // check cache protocol = URLItemCache.intern(protocol); path = URLItemCache.checkPath(path); return new ServiceConfigURL(protocol, username, password, host, port, path, parameters); }
3.68
framework_Escalator_calculateRowWidth
/** * Calculate the width of a row, as the sum of columns' widths. * * @return the width of a row, in pixels */ public double calculateRowWidth() { return getCalculatedColumnsWidth( Range.between(0, getColumnCount())); }
3.68
rocketmq-connect_PluginUtils_prunedName
/** * Remove the plugin type name at the end of a plugin class name, if such suffix is present. * This method is meant to be used to extract plugin aliases. */ public static String prunedName(PluginWrapper<?> plugin) { switch (plugin.type()) { case SOURCE: case SINK: case CONNECTOR: return prunePluginName(plugin, "Connector"); default: return prunePluginName(plugin, plugin.type().simpleName()); } }
3.68