name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
morf_SchemaModificationAdapter_dropExistingIndexesIfNecessary
/** * Drop all existing indexes of the table that's about to be deployed. */ private void dropExistingIndexesIfNecessary(Table tableToDeploy) { tableToDeploy.indexes().forEach(index -> { Table existingTableWithSameIndex = existingIndexNamesAndTables.remove(index.getName().toUpperCase()); if (existingTableWithSameIndex != null && !tableToDeploy.getName().equalsIgnoreCase(existingTableWithSameIndex.getName())) { // Only drop the index if it belongs to the previous schema under a different tablename. databaseDataSetConsumer.getSqlExecutor().execute(sqlDialect.indexDropStatements(existingTableWithSameIndex, index), connection); } }); }
3.68
hadoop_FederationStateStoreUtils_logAndThrowRetriableException
/** * Throws an <code>FederationStateStoreRetriableException</code> due to an * error in <code>FederationStateStore</code>. * * @param log the logger interface. * @param errMsgFormat the error message format string. * @param args referenced by the format specifiers in the format string. * @throws YarnException on failure */ public static void logAndThrowRetriableException( Logger log, String errMsgFormat, Object... args) throws YarnException { String errMsg = String.format(errMsgFormat, args); log.error(errMsg); throw new FederationStateStoreRetriableException(errMsg); }
3.68
framework_ConnectorFocusAndBlurHandler_removeHandlers
/** * Remove all handlers from the widget and the connector. */ public void removeHandlers() { if (focusRegistration != null) { focusRegistration.removeHandler(); } if (blurRegistration != null) { blurRegistration.removeHandler(); } if (stateChangeRegistration != null) { stateChangeRegistration.removeHandler(); } }
3.68
hbase_SingleColumnValueFilter_setFilterIfMissing
/** * Set whether entire row should be filtered if column is not found. * <p> * If true, the entire row will be skipped if the column is not found. * <p> * If false, the row will pass if the column is not found. This is default. * @param filterIfMissing flag */ public void setFilterIfMissing(boolean filterIfMissing) { this.filterIfMissing = filterIfMissing; }
3.68
hadoop_CloseableReferenceCount_unreference
/** * Decrement the reference count. * * @return True if the object is closed and has no outstanding * references. */ public boolean unreference() { int newVal = status.decrementAndGet(); Preconditions.checkState(newVal != 0xffffffff, "called unreference when the reference count was already at 0."); return newVal == STATUS_CLOSED_MASK; }
3.68
hadoop_RegisterApplicationMasterRequest_setPlacementConstraints
/** * Set Placement Constraints applicable to the * {@link org.apache.hadoop.yarn.api.records.SchedulingRequest}s * of this application. * The mapping is from a set of allocation tags to a * <code>PlacementConstraint</code> associated with the tags. * For example: * Map &lt; * &lt;hb_regionserver&gt; -&gt; node_anti_affinity, * &lt;hb_regionserver, hb_master&gt; -&gt; rack_affinity, * ... * &gt; * @param placementConstraints Placement Constraint Mapping. */ @Public @Unstable public void setPlacementConstraints( Map<Set<String>, PlacementConstraint> placementConstraints) { }
3.68
framework_ServerRpcQueue_getAll
/** * Returns a collection of all queued method invocations. * <p> * The returned collection must not be modified in any way * * @return a collection of all queued method invocations */ public Collection<MethodInvocation> getAll() { return pendingInvocations.values(); }
3.68
hadoop_NativeAzureFileSystemHelper_logAllLiveStackTraces
/* * Helper method that logs stack traces from all live threads. */ public static void logAllLiveStackTraces() { for (Map.Entry<Thread, StackTraceElement[]> entry : Thread.getAllStackTraces().entrySet()) { LOG.debug("Thread " + entry.getKey().getName()); StackTraceElement[] trace = entry.getValue(); for (int j = 0; j < trace.length; j++) { LOG.debug("\tat " + trace[j]); } } }
3.68
framework_VaadinFinderLocatorStrategy_validatePath
/* * (non-Javadoc) * * @see * com.vaadin.client.componentlocator.LocatorStrategy#validatePath(java. * lang.String) */ @Override public boolean validatePath(String path) { // This syntax is so difficult to regexp properly, that we'll just try // to find something with it regardless of the correctness of the // syntax... return true; }
3.68
hadoop_LoggingAuditor_addAttribute
/** * Add an attribute. * @param key key * @param value value */ public final void addAttribute(String key, String value) { attributes.put(key, value); }
3.68
hbase_MetricsStochasticBalancerSourceImpl_calcMruCap
/** * Calculates the mru cache capacity from the metrics size */ private static int calcMruCap(int metricsSize) { return (int) Math.ceil(metricsSize / MRU_LOAD_FACTOR) + 1; }
3.68
flink_Ordering_appendOrdering
/** * Extends this ordering by appending an additional order requirement. If the index has been * previously appended then the unmodified Ordering is returned. * * @param index Field index of the appended order requirement. * @param type Type of the appended order requirement. * @param order Order of the appended order requirement. * @return This ordering with an additional appended order requirement. */ public Ordering appendOrdering( Integer index, Class<? extends Comparable<?>> type, Order order) { if (index < 0) { throw new IllegalArgumentException("The key index must not be negative."); } if (order == null) { throw new NullPointerException(); } if (order == Order.NONE) { throw new IllegalArgumentException( "An ordering must not be created with a NONE order."); } if (!this.indexes.contains(index)) { this.indexes = this.indexes.addField(index); this.types.add(type); this.orders.add(order); } return this; }
3.68
framework_Slider_setValue
/** * Sets the value of this object. If the new value is not equal to * {@code getValue()}, fires a {@link ValueChangeEvent}. Throws * {@code NullPointerException} if the value is null. * * @param value * the new value, not {@code null} * @throws NullPointerException * if {@code value} is {@code null} */ @Override public void setValue(Double value) { Objects.requireNonNull(value, "Value cannot be null"); super.setValue(value); }
3.68
framework_ListSet_contains
// Delegate contains operations to the set @Override public boolean contains(Object o) { return itemSet.contains(o); }
3.68
flink_BinaryHashBucketArea_appendRecordAndInsert
/** Append record and insert to bucket. */ boolean appendRecordAndInsert(BinaryRowData record, int hashCode) throws IOException { final int posHashCode = findBucket(hashCode); // get the bucket for the given hash code final int bucketArrayPos = posHashCode >> table.bucketsPerSegmentBits; final int bucketInSegmentPos = (posHashCode & table.bucketsPerSegmentMask) << BUCKET_SIZE_BITS; final MemorySegment bucket = this.buckets[bucketArrayPos]; if (!table.tryDistinctBuildRow || !partition.isInMemory() || !findFirstSameBuildRow(bucket, hashCode, bucketInSegmentPos, record)) { int pointer = partition.insertIntoBuildBuffer(record); if (pointer != -1) { // record was inserted into an in-memory partition. a pointer must be inserted into // the buckets insertToBucket(bucket, bucketInSegmentPos, hashCode, pointer, true); return true; } else { return false; } } else { // distinct build rows in memory. return true; } }
3.68
morf_AbstractSqlDialectTest_expectedSelectOrderByNullsFirstDesc
/** * @return Expected SQL for {@link #testSelectOrderByNullsFirstDescendingScript()} */ protected String expectedSelectOrderByNullsFirstDesc() { return "SELECT stringField FROM " + tableName(ALTERNATE_TABLE) + " ORDER BY stringField DESC NULLS FIRST"; }
3.68
morf_Criterion_not
/** * Helper method to create a new "NOT" expression. * * <blockquote><pre> * Criterion.not(Criterion.eq(new Field("agreementnumber"), "A0001"));</pre></blockquote> * * @param criterion the first in the list of criteria * @return a new Criterion object */ public static Criterion not(Criterion criterion) { return new Criterion(Operator.NOT, criterion); }
3.68
MagicPlugin_Mage_getConversations
/** * This isa non-API method that returns the live version of the conversation map */ @Nonnull public Map<Player, MageConversation> getConversations() { return conversations; }
3.68
framework_SelectorPath_generateJavaVariable
/** * Generate Java variable assignment from given selector fragment * * @param pathFragment * Selector fragment * @return piece of java code */ private String generateJavaVariable(String pathFragment) { // Get element type and predicates from fragment List<SelectorPredicate> predicates = SelectorPredicate .extractPredicates(pathFragment); String elementType = pathFragment.split("\\[")[0]; String name = getNameFromPredicates(predicates, elementType); if (name.equals(elementType)) { name = getNameWithCount(name); } // Replace unusable characters name = name.replaceAll("\\W", ""); // Lowercase the first character of name return elementType + "Element " + name.substring(0, 1).toLowerCase(Locale.ROOT) + name.substring(1) + " = "; }
3.68
flink_ExecutionConfig_isForceAvroEnabled
/** Returns whether the Apache Avro is the default serializer for POJOs. */ public boolean isForceAvroEnabled() { return configuration.get(PipelineOptions.FORCE_AVRO); }
3.68
streampipes_Aggregation_process
// Gets called every time a new event is fired, i.e. when an aggregation has to be calculated protected void process(Iterable<Event> input, Collector<Event> out) { List<Double> values = new ArrayList<>(); Event lastEvent = new Event(); // Adds the values of all recent events in input to aggregate them later // Dumps thereby all previous events and only emits the most recent event in the window with the // aggregated value added for (Event anInput : input) { for (String aggregate : fieldsToAggregate) { values.add(anInput.getFieldBySelector(aggregate).getAsPrimitive().getAsDouble()); lastEvent = anInput; String propertyPrefix = StringUtils.substringAfterLast(aggregate, ":"); String runtimeName = propertyPrefix + "_" + aggregationType.toString().toLowerCase(); lastEvent.addField(runtimeName, getAggregate(values)); } } out.collect(lastEvent); }
3.68
flink_Time_days
/** Creates a new {@link Time} that represents the given number of days. */ public static Time days(long days) { return of(days, TimeUnit.DAYS); }
3.68
graphhopper_AngleCalc_convertAzimuth2xaxisAngle
/** * convert north based clockwise azimuth (0, 360) into x-axis/east based angle (-Pi, Pi) */ public double convertAzimuth2xaxisAngle(double azimuth) { if (Double.compare(azimuth, 360) > 0 || Double.compare(azimuth, 0) < 0) { throw new IllegalArgumentException("Azimuth " + azimuth + " must be in (0, 360)"); } double angleXY = PI_2 - azimuth / 180. * Math.PI; if (angleXY < -Math.PI) angleXY += 2 * Math.PI; if (angleXY > Math.PI) angleXY -= 2 * Math.PI; return angleXY; }
3.68
flink_DateTimeUtils_timestampMillisToTime
/** * Get time from a timestamp. * * @param ts the timestamp in milliseconds. * @return the time in milliseconds. */ public static int timestampMillisToTime(long ts) { return (int) (ts % MILLIS_PER_DAY); }
3.68
hbase_Encryption_decrypt
/** * Decrypt a stream of ciphertext given a context and IV */ public static void decrypt(OutputStream out, InputStream in, int outLen, Context context, byte[] iv) throws IOException { Decryptor d = context.getCipher().getDecryptor(); d.setKey(context.getKey()); d.setIv(iv); // can be null decrypt(out, in, outLen, d); }
3.68
flink_MemorySize_parseBytes
/** * Parses the given string as bytes. The supported expressions are listed under {@link * MemorySize}. * * @param text The string to parse * @return The parsed size, in bytes. * @throws IllegalArgumentException Thrown, if the expression cannot be parsed. */ public static long parseBytes(String text) throws IllegalArgumentException { checkNotNull(text, "text"); final String trimmed = text.trim(); checkArgument(!trimmed.isEmpty(), "argument is an empty- or whitespace-only string"); final int len = trimmed.length(); int pos = 0; char current; while (pos < len && (current = trimmed.charAt(pos)) >= '0' && current <= '9') { pos++; } final String number = trimmed.substring(0, pos); final String unit = trimmed.substring(pos).trim().toLowerCase(Locale.US); if (number.isEmpty()) { throw new NumberFormatException("text does not start with a number"); } final long value; try { value = Long.parseLong(number); // this throws a NumberFormatException on overflow } catch (NumberFormatException e) { throw new IllegalArgumentException( "The value '" + number + "' cannot be re represented as 64bit number (numeric overflow)."); } final long multiplier = parseUnit(unit).map(MemoryUnit::getMultiplier).orElse(1L); final long result = value * multiplier; // check for overflow if (result / multiplier != value) { throw new IllegalArgumentException( "The value '" + text + "' cannot be re represented as 64bit number of bytes (numeric overflow)."); } return result; }
3.68
framework_StringToShortConverter_getModelType
/* * (non-Javadoc) * * @see com.vaadin.data.util.converter.Converter#getModelType() */ @Override public Class<Short> getModelType() { return Short.class; }
3.68
hadoop_PublishedConfiguration_asJson
/** * Return the values as json string * @return the JSON representation * @throws IOException marshalling failure */ public String asJson() throws IOException { ObjectMapper mapper = new ObjectMapper(); mapper.configure(SerializationFeature.INDENT_OUTPUT, true); String json = mapper.writeValueAsString(entries); return json; }
3.68
graphhopper_Entity_writeTimeField
/** * Take a time expressed in seconds since noon - 12h (midnight, usually) and write it in HH:MM:SS format. */ protected void writeTimeField (int secsSinceMidnight) throws IOException { if (secsSinceMidnight == INT_MISSING) { writeStringField(""); return; } writeStringField(convertToGtfsTime(secsSinceMidnight)); }
3.68
hadoop_ManifestCommitter_getTaskAttemptDir
/** * Get the task attempt dir. * May be null. * @return a path or null. */ private Path getTaskAttemptDir() { return taskAttemptDir; }
3.68
hadoop_TaskInfo_getResourceUsageMetrics
/** * @return Resource usage metrics */ public ResourceUsageMetrics getResourceUsageMetrics() { return metrics; }
3.68
hmily_HmilyRepositoryFacade_writeHmilyLocks
/** * Write hmily locks. * * @param locks locks */ public void writeHmilyLocks(final Collection<HmilyLock> locks) { int count = hmilyRepository.writeHmilyLocks(locks); if (count != locks.size()) { HmilyLock lock = locks.iterator().next(); throw new HmilyLockConflictException(String.format("current record [%s] has locked by transaction:[%s]", lock.getLockId(), lock.getTransId())); } }
3.68
streampipes_SpGeometryBuilder_isInWGSCoordinateRange
/** * Is in wgs coordinate range boolean. * * @param valueToCheck Any Value * @param min Min value to check * @param max max value to check * @return true if value is in min max range */ private static boolean isInWGSCoordinateRange(double valueToCheck, double min, double max) { return valueToCheck > min && valueToCheck < max; }
3.68
hadoop_CredentialProviderListFactory_loadAWSProviderClasses
/** * Load list of AWS credential provider/credential provider factory classes. * @param conf configuration * @param key key * @param defaultValue list of default values * @return the list of classes, empty if the default list is empty and * there was no match for the key in the configuration. * @throws IOException on a failure to load the list. */ private static Collection<String> loadAWSProviderClasses(Configuration conf, String key, Class<?>... defaultValue) throws IOException { final Collection<String> classnames = conf.getTrimmedStringCollection(key); if (classnames.isEmpty()) { // empty list; return the defaults return Arrays.stream(defaultValue).map(c -> c.getName()).collect(Collectors.toList()); } else { return classnames; } }
3.68
hbase_StorageClusterStatusModel_setRootIndexSizeKB
/** * @param rootIndexSizeKB The current total size of root-level indexes for the region, in KB */ public void setRootIndexSizeKB(int rootIndexSizeKB) { this.rootIndexSizeKB = rootIndexSizeKB; }
3.68
framework_AbstractComponent_getState
/** * Returns the shared state bean with information to be sent from the server * to the client. * * Subclasses should override this method and set any relevant fields of the * state returned by super.getState(). * * @since 7.0 * * @return updated component shared state */ @Override protected AbstractComponentState getState() { return (AbstractComponentState) super.getState(); }
3.68
flink_BinarySegmentUtils_find
/** * Find equal segments2 in segments1. * * @param segments1 segs to find. * @param segments2 sub segs. * @return Return the found offset, return -1 if not find. */ public static int find( MemorySegment[] segments1, int offset1, int numBytes1, MemorySegment[] segments2, int offset2, int numBytes2) { if (numBytes2 == 0) { // quick way 1. return offset1; } if (inFirstSegment(segments1, offset1, numBytes1) && inFirstSegment(segments2, offset2, numBytes2)) { byte first = segments2[0].get(offset2); int end = numBytes1 - numBytes2 + offset1; for (int i = offset1; i <= end; i++) { // quick way 2: equal first byte. if (segments1[0].get(i) == first && segments1[0].equalTo(segments2[0], i, offset2, numBytes2)) { return i; } } return -1; } else { return findInMultiSegments( segments1, offset1, numBytes1, segments2, offset2, numBytes2); } }
3.68
flink_TaskSlot_markActive
/** * Mark this slot as active. A slot can only be marked active if it's in state allocated. * * <p>The method returns true if the slot was set to active. Otherwise it returns false. * * @return True if the new state of the slot is active; otherwise false */ public boolean markActive() { if (TaskSlotState.ALLOCATED == state || TaskSlotState.ACTIVE == state) { state = TaskSlotState.ACTIVE; return true; } else { return false; } }
3.68
framework_AbstractRemoteDataSource_insertRowData
/** * Informs this data source that new data has been inserted from the server. * * @param firstRowIndex * the destination index of the new row data * @param count * the number of rows inserted */ protected void insertRowData(int firstRowIndex, int count) { Profiler.enter("AbstractRemoteDataSource.insertRowData"); // Cache was not filled since previous insertRowData. The old rows are // no longer useful. if (invalidatedRows != null) { invalidatedRows.clear(); } size += count; if (firstRowIndex <= cached.getStart()) { Range oldCached = cached; cached = cached.offsetBy(count); for (int i = 1; i <= cached.length(); i++) { int oldIndex = oldCached.getEnd() - i; int newIndex = cached.getEnd() - i; moveRowFromIndexToIndex(oldIndex, newIndex); } } else if (cached.contains(firstRowIndex)) { int oldCacheEnd = cached.getEnd(); Range[] splitOldCache = cached.splitAt(firstRowIndex); cached = splitOldCache[0]; Range invalidated = splitOldCache[1]; if (trackInvalidatedRows) { /* * We need to invalidate the cache from the inserted row * onwards, since the cache wants to be a contiguous range. It * doesn't support holes. * * If holes were supported, we could shift the higher part of * "cached" and leave a hole the size of "count" in the middle. */ trackInvalidatedRowsFromCache(invalidated, count); } for (int i = firstRowIndex; i < oldCacheEnd; i++) { T row = indexToRowMap.remove(Integer.valueOf(i)); keyToIndexMap.remove(getRowKey(row)); } } getHandlers().forEach(dch -> dch.dataAdded(firstRowIndex, count)); ensureCoverageCheck(); Profiler.leave("AbstractRemoteDataSource.insertRowData"); }
3.68
flink_TypeTransformations_toNullable
/** * Returns a type transformation that transforms data type to nullable data type but keeps other * information unchanged. */ public static TypeTransformation toNullable() { return DataType::nullable; }
3.68
querydsl_AbstractCollQuery_from
/** * Add a query source * * @param <A> type of expression * @param entity Path for the source * @param col content of the source * @return current object */ public <A> Q from(Path<A> entity, Iterable<? extends A> col) { iterables.put(entity, col); getMetadata().addJoin(JoinType.DEFAULT, entity); return queryMixin.getSelf(); }
3.68
hadoop_RouterFedBalance_continueJob
/** * Recover and continue the unfinished jobs. */ private int continueJob() throws InterruptedException { BalanceProcedureScheduler scheduler = new BalanceProcedureScheduler(getConf()); try { scheduler.init(true); while (true) { Collection<BalanceJob> jobs = scheduler.getAllJobs(); int unfinished = 0; for (BalanceJob job : jobs) { if (!job.isJobDone()) { unfinished++; } LOG.info(job.toString()); } if (unfinished == 0) { break; } Thread.sleep(TimeUnit.SECONDS.toMillis(10)); } } catch (IOException e) { LOG.error("Continue balance job failed.", e); return -1; } finally { scheduler.shutDown(); } return 0; }
3.68
pulsar_MultiRolesTokenAuthorizationProvider_canProduceAsync
/** * Check if the specified role has permission to send messages to the specified fully qualified topic name. * * @param topicName the fully qualified topic name associated with the topic. * @param role the app id used to send messages to the topic. */ @Override public CompletableFuture<Boolean> canProduceAsync(TopicName topicName, String role, AuthenticationDataSource authenticationData) { return authorize(role, authenticationData, r -> super.canProduceAsync(topicName, r, authenticationData)); }
3.68
hudi_HoodieTable_getIndexingMetadataWriter
/** * Gets the metadata writer for async indexer. * * @param triggeringInstantTimestamp The instant that is triggering this metadata write. * @return An instance of {@link HoodieTableMetadataWriter}. */ public Option<HoodieTableMetadataWriter> getIndexingMetadataWriter(String triggeringInstantTimestamp) { return getMetadataWriter(triggeringInstantTimestamp, LAZY); }
3.68
hadoop_S3LogParser_e
/** * Simple entry using the {@link #SIMPLE} pattern. * @param name name of the element (for code clarity only) * @return the pattern for the regexp */ private static String e(String name) { return e(name, SIMPLE); }
3.68
hbase_ServerName_getVersionedBytes
/** * Return {@link #getServerName()} as bytes with a short-sized prefix with the {@link #VERSION} of * this class. */ public synchronized byte[] getVersionedBytes() { if (this.bytes == null) { this.bytes = Bytes.add(VERSION_BYTES, Bytes.toBytes(getServerName())); } return this.bytes; }
3.68
framework_Table_addGeneratedColumn
/** * Adds a generated column to the Table. * <p> * A generated column is a column that exists only in the Table, not as a * property in the underlying Container. It shows up just as a regular * column. * </p> * <p> * A generated column will override a property with the same id, so that the * generated column is shown instead of the column representing the * property. Note that getContainerProperty() will still get the real * property. * </p> * <p> * Table will not listen to value change events from properties overridden * by generated columns. If the content of your generated column depends on * properties that are not directly visible in the table, attach value * change listener to update the content on all depended properties. * Otherwise your UI might not get updated as expected. * </p> * <p> * Also note that getVisibleColumns() will return the generated columns, * while getContainerPropertyIds() will not. * </p> * * @param id * the id of the column to be added * @param generatedColumn * the {@link ColumnGenerator} to use for this column */ public void addGeneratedColumn(Object id, ColumnGenerator generatedColumn) { if (generatedColumn == null) { throw new IllegalArgumentException( "Can not add null as a GeneratedColumn"); } if (columnGenerators.containsKey(id)) { throw new IllegalArgumentException( "Can not add the same GeneratedColumn twice, id:" + id); } else { columnGenerators.put(id, generatedColumn); /* * add to visible column list unless already there (overriding * column from DS) */ if (!visibleColumns.contains(id)) { visibleColumns.add(id); } refreshRowCache(); } }
3.68
flink_FieldReferenceLookup_getInputFields
/** * Gives matching fields of underlying inputs in order of those inputs and order of fields * within input. * * @return concatenated list of matching fields of all inputs. */ public List<FieldReferenceExpression> getInputFields( List<ColumnExpansionStrategy> expansionStrategies) { return fieldReferences.stream() .flatMap(input -> input.values().stream()) .filter(fieldRef -> includeExpandedColumn(fieldRef.column, expansionStrategies)) .map(FieldReference::toExpr) .collect(toList()); }
3.68
shardingsphere-elasticjob_GuaranteeService_clearAllStartedInfo
/** * Clear all started job's info. */ public void clearAllStartedInfo() { jobNodeStorage.removeJobNodeIfExisted(GuaranteeNode.STARTED_ROOT); }
3.68
pulsar_Record_getRecordSequence
/** * Retrieves the sequence of the record from a source partition. * * @return Sequence Id associated with the record */ default Optional<Long> getRecordSequence() { return Optional.empty(); }
3.68
hbase_Chunk_getData
/** Returns This chunk's backing data. */ ByteBuffer getData() { return this.data; }
3.68
morf_SchemaHomology_checkTable
/** * @param table1 First table to compare. * @param table2 Second table to compare. */ private void checkTable(Table table1, Table table2) { matches("Table name", table1.getName().toUpperCase(), table2.getName().toUpperCase()); checkColumns(table1.getName(), table1.columns(), table2.columns()); checkIndexes(table1.getName(), table1.indexes(), table2.indexes()); checkPrimaryKeys(table1.getName(), upperCaseNamesOfColumns(primaryKeysForTable(table1)), upperCaseNamesOfColumns(primaryKeysForTable(table2))); }
3.68
flink_FlinkDatabaseMetaData_getSchemas
// TODO Flink will support SHOW DATABASES LIKE statement in FLIP-297, this method will be // supported after that issue. @Override public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException { throw new UnsupportedOperationException(); }
3.68
open-banking-gateway_FintechConsentAccessImpl_createAnonymousConsentNotPersist
/** * Creates consent template, but does not persist it */ public ProtocolFacingConsent createAnonymousConsentNotPersist() { return anonymousPsuConsentAccess.createDoNotPersist(); }
3.68
framework_FieldGroup_discard
/** * Discards all changes done to the bound fields. * <p> * Only has effect if buffered mode is used. * */ public void discard() { for (Field<?> f : fieldToPropertyId.keySet()) { try { f.discard(); } catch (Exception e) { // TODO: handle exception // What can we do if discard fails other than try to discard all // other fields? } } }
3.68
flink_ExecutionJobVertex_cancelWithFuture
/** * Cancels all currently running vertex executions. * * @return A future that is complete once all tasks have canceled. */ public CompletableFuture<Void> cancelWithFuture() { return FutureUtils.waitForAll(mapExecutionVertices(ExecutionVertex::cancel)); }
3.68
pulsar_AuthenticationDataProvider_hasDataFromCommand
/** * Check if data from Pulsar protocol are available. * * @return true if this authentication data contain data from Pulsar protocol */ default boolean hasDataFromCommand() { return false; }
3.68
hbase_StorageClusterStatusModel_setTotalCompactingKVs
/** * @param totalCompactingKVs The total compacting key values in currently running compaction */ public void setTotalCompactingKVs(long totalCompactingKVs) { this.totalCompactingKVs = totalCompactingKVs; }
3.68
hadoop_TimelineReaderWebServicesUtils_parseStr
/** * Trims the passed string if its not null. * @param str Passed string. * @return trimmed string if string is not null, null otherwise. */ static String parseStr(String str) { return StringUtils.trimToNull(str); }
3.68
hadoop_TFile_end
/** * Get the end location of the TFile. * * @return The location right after the last key-value pair in TFile. */ Location end() { return end; }
3.68
cron-utils_FieldConstraintsBuilder_withValidRange
/** * Allows to set a range of valid values for field. * * @param startRange - start range value * @param endRange - end range value * @return same FieldConstraintsBuilder instance */ public FieldConstraintsBuilder withValidRange(final int startRange, final int endRange) { this.startRange = startRange; this.endRange = endRange; return this; }
3.68
hbase_DefaultMemStore_getNextRow
/** * @param cell Find the row that comes after this one. If null, we return the first. * @return Next row or null if none found. */ Cell getNextRow(final Cell cell) { return getLowest(getNextRow(cell, this.getActive().getCellSet()), getNextRow(cell, this.snapshot.getCellSet())); }
3.68
hbase_BitComparator_getOperator
/** Returns the bitwise operator */ public BitwiseOp getOperator() { return bitOperator; }
3.68
framework_AbstractSelect_isEmpty
/** * For multi-selectable fields, also an empty collection of values is * considered to be an empty field. * * @see LegacyAbstractField#isEmpty(). */ @Override public boolean isEmpty() { if (!multiSelect) { return super.isEmpty(); } else { Object value = getValue(); return super.isEmpty() || (value instanceof Collection && ((Collection<?>) value).isEmpty()); } }
3.68
hbase_TableName_isLegalFullyQualifiedTableName
/** * Check passed byte array, "tableName", is legal user-space table name. * @return Returns passed <code>tableName</code> param * @throws IllegalArgumentException if passed a tableName is null or is made of other than 'word' * characters or underscores: i.e. * <code>[\p{IsAlphabetic}\p{Digit}.-:]</code>. The ':' is used * to delimit the namespace from the table name and can be used * for nothing else. Namespace names can only contain 'word' * characters <code>[\p{IsAlphabetic}\p{Digit}]</code> or '_' * Qualifier names can only contain 'word' characters * <code>[\p{IsAlphabetic}\p{Digit}]</code> or '_', '.' or '-'. * The name may not start with '.' or '-'. Valid fully qualified * table names: foo:bar, namespace=&gt;foo, table=&gt;bar * org:foo.bar, namespace=org, table=&gt;foo.bar */ public static byte[] isLegalFullyQualifiedTableName(final byte[] tableName) { if (tableName == null || tableName.length <= 0) { throw new IllegalArgumentException("Name is null or empty"); } int namespaceDelimIndex = ArrayUtils.lastIndexOf(tableName, (byte) NAMESPACE_DELIM); if (namespaceDelimIndex < 0) { isLegalTableQualifierName(tableName); } else { isLegalNamespaceName(tableName, 0, namespaceDelimIndex); isLegalTableQualifierName(tableName, namespaceDelimIndex + 1, tableName.length); } return tableName; }
3.68
hbase_VersionModel_getOSVersion
/** Returns the OS name, version, and hardware architecture */ @XmlAttribute(name = "OS") public String getOSVersion() { return osVersion; }
3.68
hudi_PartitionAwareClusteringPlanStrategy_buildClusteringGroupsForPartition
/** * Create Clustering group based on files eligible for clustering in the partition. */ protected Stream<HoodieClusteringGroup> buildClusteringGroupsForPartition(String partitionPath, List<FileSlice> fileSlices) { HoodieWriteConfig writeConfig = getWriteConfig(); List<Pair<List<FileSlice>, Integer>> fileSliceGroups = new ArrayList<>(); List<FileSlice> currentGroup = new ArrayList<>(); // Sort fileSlices before dividing, which makes dividing more compact List<FileSlice> sortedFileSlices = new ArrayList<>(fileSlices); sortedFileSlices.sort((o1, o2) -> (int) ((o2.getBaseFile().isPresent() ? o2.getBaseFile().get().getFileSize() : writeConfig.getParquetMaxFileSize()) - (o1.getBaseFile().isPresent() ? o1.getBaseFile().get().getFileSize() : writeConfig.getParquetMaxFileSize()))); long totalSizeSoFar = 0; for (FileSlice currentSlice : sortedFileSlices) { long currentSize = currentSlice.getBaseFile().isPresent() ? currentSlice.getBaseFile().get().getFileSize() : writeConfig.getParquetMaxFileSize(); // check if max size is reached and create new group, if needed. if (totalSizeSoFar + currentSize > writeConfig.getClusteringMaxBytesInGroup() && !currentGroup.isEmpty()) { int numOutputGroups = getNumberOfOutputFileGroups(totalSizeSoFar, writeConfig.getClusteringTargetFileMaxBytes()); LOG.info("Adding one clustering group " + totalSizeSoFar + " max bytes: " + writeConfig.getClusteringMaxBytesInGroup() + " num input slices: " + currentGroup.size() + " output groups: " + numOutputGroups); fileSliceGroups.add(Pair.of(currentGroup, numOutputGroups)); currentGroup = new ArrayList<>(); totalSizeSoFar = 0; // if fileSliceGroups's size reach the max group, stop loop if (fileSliceGroups.size() >= writeConfig.getClusteringMaxNumGroups()) { LOG.info("Having generated the maximum number of groups : " + writeConfig.getClusteringMaxNumGroups()); break; } } // Add to the current file-group currentGroup.add(currentSlice); // assume each file group size is ~= parquet.max.file.size totalSizeSoFar += currentSize; } if (!currentGroup.isEmpty()) { if (currentGroup.size() > 1 || writeConfig.shouldClusteringSingleGroup()) { int numOutputGroups = getNumberOfOutputFileGroups(totalSizeSoFar, writeConfig.getClusteringTargetFileMaxBytes()); LOG.info("Adding final clustering group " + totalSizeSoFar + " max bytes: " + writeConfig.getClusteringMaxBytesInGroup() + " num input slices: " + currentGroup.size() + " output groups: " + numOutputGroups); fileSliceGroups.add(Pair.of(currentGroup, numOutputGroups)); } } return fileSliceGroups.stream().map(fileSliceGroup -> HoodieClusteringGroup.newBuilder() .setSlices(getFileSliceInfo(fileSliceGroup.getLeft())) .setNumOutputFileGroups(fileSliceGroup.getRight()) .setMetrics(buildMetrics(fileSliceGroup.getLeft())) .build()); }
3.68
hadoop_TimelineEntityGroupId_getTimelineEntityGroupId
/** * Get the <code>timelineEntityGroupId</code>. * * @return <code>timelineEntityGroupId</code> */ public String getTimelineEntityGroupId() { return this.id; }
3.68
hbase_FileIOEngine_read
/** * Transfers data from file to the given byte buffer * @param be an {@link BucketEntry} which maintains an (offset, len, refCnt) * @return the {@link Cacheable} with block data inside. * @throws IOException if any IO error happen. */ @Override public Cacheable read(BucketEntry be) throws IOException { long offset = be.offset(); int length = be.getLength(); Preconditions.checkArgument(length >= 0, "Length of read can not be less than 0."); ByteBuff dstBuff = be.allocator.allocate(length); if (length != 0) { try { accessFile(readAccessor, dstBuff, offset); // The buffer created out of the fileChannel is formed by copying the data from the file // Hence in this case there is no shared memory that we point to. Even if the BucketCache // evicts this buffer from the file the data is already copied and there is no need to // ensure that the results are not corrupted before consuming them. if (dstBuff.limit() != length) { throw new IllegalArgumentIOException( "Only " + dstBuff.limit() + " bytes read, " + length + " expected"); } } catch (IOException ioe) { dstBuff.release(); throw ioe; } } if (maintainPersistence) { dstBuff.rewind(); long cachedNanoTime = dstBuff.getLong(); if (be.getCachedTime() != cachedNanoTime) { dstBuff.release(); throw new HBaseIOException("The cached time recorded within the cached block: " + cachedNanoTime + " differs from its bucket entry: " + be.getCachedTime()); } dstBuff.limit(length); dstBuff = dstBuff.slice(); } else { dstBuff.rewind(); } return be.wrapAsCacheable(dstBuff); }
3.68
pulsar_PulsarClientImplementationBindingImpl_convertKeyValueSchemaInfoDataToString
/** * Convert the key/value schema data. * * @param kvSchemaInfo the key/value schema info * @return the convert key/value schema data string */ public String convertKeyValueSchemaInfoDataToString(KeyValue<SchemaInfo, SchemaInfo> kvSchemaInfo) throws IOException { return SchemaUtils.convertKeyValueSchemaInfoDataToString(kvSchemaInfo); }
3.68
framework_VaadinService_setDefaultClassLoader
/** * Tries to acquire default class loader and sets it as a class loader for * this {@link VaadinService} if found. If current security policy disallows * acquiring class loader instance it will log a message and re-throw * {@link SecurityException} * * @throws SecurityException * If current security policy forbids acquiring class loader * * @since 7.3.5 */ protected void setDefaultClassLoader() { try { setClassLoader( VaadinServiceClassLoaderUtil.findDefaultClassLoader()); } catch (SecurityException e) { getLogger().log(Level.SEVERE, Constants.CANNOT_ACQUIRE_CLASSLOADER_SEVERE, e); throw e; } }
3.68
framework_DesignContext_getPackagePrefixes
/** * Gets all registered package prefixes. * * * @since 7.5.0 * @see #getPackage(String) * @return a collection of package prefixes */ public Collection<String> getPackagePrefixes() { return Collections.unmodifiableCollection(prefixToPackage.keySet()); }
3.68
framework_VCalendarPanel_getSubmitListener
/** * Returns the submit listener that listens to selection made from the * panel. * * @return The listener or NULL if no listener has been set */ public SubmitListener getSubmitListener() { return submitListener; }
3.68
dubbo_ServiceModel_getServiceConfig
/** * ServiceModel should be decoupled from AbstractInterfaceConfig and removed in a future version * @return */ @Deprecated public ServiceConfigBase<?> getServiceConfig() { if (config == null) { return null; } if (config instanceof ServiceConfigBase) { return (ServiceConfigBase<?>) config; } else { throw new IllegalArgumentException("Current ServiceModel is not a ProviderModel"); } }
3.68
flink_SinkFunction_invoke
/** * Writes the given value to the sink. This function is called for every record. * * <p>You have to override this method when implementing a {@code SinkFunction}, this is a * {@code default} method for backward compatibility with the old-style method only. * * @param value The input record. * @param context Additional context about the input record. * @throws Exception This method may throw exceptions. Throwing an exception will cause the * operation to fail and may trigger recovery. */ default void invoke(IN value, Context context) throws Exception { invoke(value); }
3.68
hudi_AbstractStreamWriteFunction_invalidInstant
/** * Returns whether the pending instant is invalid to write with. */ private boolean invalidInstant(String instant, boolean hasData) { return instant.equals(this.currentInstant) && hasData; }
3.68
hbase_HFileBlockDefaultEncodingContext_close
/** * Releases the compressor this writer uses to compress blocks into the compressor pool. */ @Override public void close() { if (compressor != null) { this.fileContext.getCompression().returnCompressor(compressor); compressor = null; } }
3.68
hadoop_ApplicationServiceRecordProcessor_init
/** * Initializes the descriptor parameters. * * @param serviceRecord the service record. */ @Override protected void init(ServiceRecord serviceRecord) throws Exception { super.init(serviceRecord); if (getTarget() == null) { return; } try { this.setTarget(getIpv6Address(getTarget())); } catch (UnknownHostException e) { throw new IllegalStateException(e); } }
3.68
AreaShop_BuyRegion_buy
/** * Buy a region. * @param offlinePlayer The player that wants to buy the region * @return true if it succeeded and false if not */ @SuppressWarnings("deprecation") public boolean buy(OfflinePlayer offlinePlayer) { // Check if the player has permission if(!plugin.hasPermission(offlinePlayer, "areashop.buy")) { message(offlinePlayer, "buy-noPermission"); return false; } if(plugin.getEconomy() == null) { message(offlinePlayer, "general-noEconomy"); return false; } if(isInResellingMode()) { if(!plugin.hasPermission(offlinePlayer, "areashop.buyresell")) { message(offlinePlayer, "buy-noPermissionResell"); return false; } } else { if(!plugin.hasPermission(offlinePlayer, "areashop.buynormal")) { message(offlinePlayer, "buy-noPermissionNoResell"); return false; } } if(getWorld() == null) { message(offlinePlayer, "general-noWorld"); return false; } if(getRegion() == null) { message(offlinePlayer, "general-noRegion"); return false; } if (isSold() && !(isInResellingMode() && !isBuyer(offlinePlayer))) { if(isBuyer(offlinePlayer)) { message(offlinePlayer, "buy-yours"); } else { message(offlinePlayer, "buy-someoneElse"); } return false; } boolean isResell = isInResellingMode(); // Only relevant if the player is online Player player = offlinePlayer.getPlayer(); if(player != null) { // Check if the players needs to be in the region for buying if(restrictedToRegion() && (!player.getWorld().getName().equals(getWorldName()) || !getRegion().contains(player.getLocation().getBlockX(), player.getLocation().getBlockY(), player.getLocation().getBlockZ()))) { message(offlinePlayer, "buy-restrictedToRegion"); return false; } // Check if the players needs to be in the world for buying if(restrictedToWorld() && !player.getWorld().getName().equals(getWorldName())) { message(offlinePlayer, "buy-restrictedToWorld", player.getWorld().getName()); return false; } } // Check region limits LimitResult limitResult = this.limitsAllow(RegionType.BUY, offlinePlayer); AreaShop.debug("LimitResult: " + limitResult.toString()); if(!limitResult.actionAllowed()) { if(limitResult.getLimitingFactor() == LimitType.TOTAL) { message(offlinePlayer, "total-maximum", limitResult.getMaximum(), limitResult.getCurrent(), limitResult.getLimitingGroup()); return false; } if(limitResult.getLimitingFactor() == LimitType.BUYS) { message(offlinePlayer, "buy-maximum", limitResult.getMaximum(), limitResult.getCurrent(), limitResult.getLimitingGroup()); return false; } // Should not be reached, but is safe like this return false; } // Check if the player has enough money if (isResell && !plugin.getEconomy().has(offlinePlayer, getWorldName(), getResellPrice())) { message(offlinePlayer, "buy-lowMoneyResell", Utils.formatCurrency(plugin.getEconomy().getBalance(offlinePlayer, getWorldName()))); return false; } if (!isResell && !plugin.getEconomy().has(offlinePlayer, getWorldName(), getPrice())) { message(offlinePlayer, "buy-lowMoney", Utils.formatCurrency(plugin.getEconomy().getBalance(offlinePlayer, getWorldName()))); return false; } UUID oldOwner = getBuyer(); if(isResell && oldOwner != null) { // Broadcast and check event ResellingRegionEvent event = new ResellingRegionEvent(this, offlinePlayer); Bukkit.getPluginManager().callEvent(event); if(event.isCancelled()) { message(offlinePlayer, "general-cancelled", event.getReason()); return false; } getFriendsFeature().clearFriends(); double resellPrice = getResellPrice(); // Transfer the money to the previous owner EconomyResponse r = plugin.getEconomy().withdrawPlayer(offlinePlayer, getWorldName(), getResellPrice()); if(!r.transactionSuccess()) { message(offlinePlayer, "buy-payError"); AreaShop.debug("Something went wrong with getting money from " + offlinePlayer.getName() + " while buying " + getName() + ": " + r.errorMessage); return false; } r = null; OfflinePlayer oldOwnerPlayer = Bukkit.getOfflinePlayer(oldOwner); String oldOwnerName = getPlayerName(); if(oldOwnerPlayer != null && oldOwnerPlayer.getName() != null) { r = plugin.getEconomy().depositPlayer(oldOwnerPlayer, getWorldName(), getResellPrice()); oldOwnerName = oldOwnerPlayer.getName(); } else if(oldOwnerName != null) { r = plugin.getEconomy().depositPlayer(oldOwnerName, getWorldName(), getResellPrice()); } if(r == null || !r.transactionSuccess()) { AreaShop.warn("Something went wrong with paying '" + oldOwnerName + "' " + getFormattedPrice() + " for his resell of region " + getName() + " to " + offlinePlayer.getName()); } // Resell is done, disable that now disableReselling(); // Set the owner setBuyer(offlinePlayer.getUniqueId()); updateLastActiveTime(); // Update everything handleSchematicEvent(RegionEvent.RESELL); // Notify about updates this.notifyAndUpdate(new ResoldRegionEvent(this, oldOwner)); // Send message to the player message(offlinePlayer, "buy-successResale", oldOwnerName); Player seller = Bukkit.getPlayer(oldOwner); if(seller != null) { message(seller, "buy-successSeller", resellPrice); } } else { // Broadcast and check event BuyingRegionEvent event = new BuyingRegionEvent(this, offlinePlayer); Bukkit.getPluginManager().callEvent(event); if(event.isCancelled()) { message(offlinePlayer, "general-cancelled", event.getReason()); return false; } // Substract the money from the players balance EconomyResponse r = plugin.getEconomy().withdrawPlayer(offlinePlayer, getWorldName(), getPrice()); if(!r.transactionSuccess()) { message(offlinePlayer, "buy-payError"); return false; } // Optionally give money to the landlord OfflinePlayer landlordPlayer = null; if(getLandlord() != null) { landlordPlayer = Bukkit.getOfflinePlayer(getLandlord()); } String landlordName = getLandlordName(); if(landlordName != null) { if(landlordPlayer != null && landlordPlayer.getName() != null) { r = plugin.getEconomy().depositPlayer(landlordPlayer, getWorldName(), getPrice()); } else { r = plugin.getEconomy().depositPlayer(landlordName, getWorldName(), getPrice()); } if(r != null && !r.transactionSuccess()) { AreaShop.warn("Something went wrong with paying '" + landlordName + "' " + getFormattedPrice() + " for his sell of region " + getName() + " to " + offlinePlayer.getName()); } } // Set the owner setBuyer(offlinePlayer.getUniqueId()); updateLastActiveTime(); // Send message to the player message(offlinePlayer, "buy-succes"); // Update everything handleSchematicEvent(RegionEvent.BOUGHT); // Notify about updates this.notifyAndUpdate(new BoughtRegionEvent(this)); } return true; }
3.68
flink_MetricQueryService_replaceInvalidChars
/** * Lightweight method to replace unsupported characters. If the string does not contain any * unsupported characters, this method creates no new string (and in fact no new objects at * all). * * <p>Replacements: * * <ul> * <li>{@code space : . ,} are replaced by {@code _} (underscore) * </ul> */ private static String replaceInvalidChars(String str) { char[] chars = null; final int strLen = str.length(); int pos = 0; for (int i = 0; i < strLen; i++) { final char c = str.charAt(i); switch (c) { case ' ': case '.': case ':': case ',': if (chars == null) { chars = str.toCharArray(); } chars[pos++] = '_'; break; default: if (chars != null) { chars[pos] = c; } pos++; } } return chars == null ? str : new String(chars, 0, pos); }
3.68
framework_ConnectorTracker_markConnectorsDirtyRecursively
/** * Marks all visible connectors dirty, starting from the given connector and * going downwards in the hierarchy. * * @param c * The component to start iterating downwards from */ private void markConnectorsDirtyRecursively(ClientConnector c) { if (c instanceof Component && !((Component) c).isVisible()) { return; } markDirty(c); for (ClientConnector child : AbstractClientConnector .getAllChildrenIterable(c)) { markConnectorsDirtyRecursively(child); } }
3.68
dubbo_URLParam_removeParameters
/** * remove specified parameters in URLParam * * @param keys keys to being removed * @return A new URLParam */ public URLParam removeParameters(String... keys) { if (keys == null || keys.length == 0) { return this; } // lazy init, null if no modify BitSet newKey = null; int[] newValueArray = null; Map<String, String> newExtraParams = null; Map<String, Map<String, String>> newMethodParams = null; for (String key : keys) { int keyIndex = DynamicParamTable.getKeyIndex(enableCompressed, key); if (keyIndex >= 0 && KEY.get(keyIndex)) { if (newKey == null) { newKey = (BitSet) KEY.clone(); } newKey.clear(keyIndex); // which offset is in VALUE array, set value as -1, compress in the end if (newValueArray == null) { newValueArray = new int[VALUE.length]; System.arraycopy(VALUE, 0, newValueArray, 0, VALUE.length); } // KEY is immutable newValueArray[keyIndexToIndex(KEY, keyIndex)] = -1; } if (EXTRA_PARAMS.containsKey(key)) { if (newExtraParams == null) { newExtraParams = new HashMap<>(EXTRA_PARAMS); } newExtraParams.remove(key); String[] methodSplit = key.split("\\."); if (methodSplit.length == 2) { if (newMethodParams == null) { newMethodParams = new HashMap<>(METHOD_PARAMETERS); } Map<String, String> methodMap = newMethodParams.get(methodSplit[1]); if (CollectionUtils.isNotEmptyMap(methodMap)) { methodMap.remove(methodSplit[0]); } } } // ignore if key is absent } if (newKey == null) { newKey = KEY; } if (newValueArray == null) { newValueArray = VALUE; } else { // remove -1 value newValueArray = compressArray(newValueArray); } if (newExtraParams == null) { newExtraParams = EXTRA_PARAMS; } if (newMethodParams == null) { newMethodParams = METHOD_PARAMETERS; } if (newKey.cardinality() + newExtraParams.size() == 0) { // empty, directly return cache return EMPTY_PARAM; } else { return new URLParam(newKey, newValueArray, newExtraParams, newMethodParams, null); } }
3.68
framework_BasicEventMoveHandler_setDates
/** * Set the start and end dates for the event. * * @param event * The event that the start and end dates should be set * @param start * The start date * @param end * The end date */ protected void setDates(EditableCalendarEvent event, Date start, Date end) { event.setStart(start); event.setEnd(end); }
3.68
shardingsphere-elasticjob_FailoverService_getFailoveringItems
/** * Get failovering items. * * @param jobInstanceId job instance ID * @return failovering items */ public List<Integer> getFailoveringItems(final String jobInstanceId) { List<String> items = jobNodeStorage.getJobNodeChildrenKeys(ShardingNode.ROOT); List<Integer> result = new ArrayList<>(items.size()); for (String each : items) { int item = Integer.parseInt(each); String node = FailoverNode.getExecutingFailoverNode(item); if (jobNodeStorage.isJobNodeExisted(node) && jobInstanceId.equals(jobNodeStorage.getJobNodeDataDirectly(node))) { result.add(item); } } Collections.sort(result); return result; }
3.68
flink_FlinkRelMetadataQuery_getUpsertKeys
/** * Determines the set of upsert minimal keys for this expression. A key is represented as an * {@link org.apache.calcite.util.ImmutableBitSet}, where each bit position represents a 0-based * output column ordinal. * * <p>Different from the unique keys: In distributed streaming computing, one record may be * divided into RowKind.UPDATE_BEFORE and RowKind.UPDATE_AFTER. If a key changing join is * connected downstream, the two records will be divided into different tasks, resulting in * disorder. In this case, the downstream cannot rely on the order of the original key. So in * this case, it has unique keys in the traditional sense, but it doesn't have upsert keys. * * @return set of keys, or null if this information cannot be determined (whereas empty set * indicates definitely no keys at all) */ public Set<ImmutableBitSet> getUpsertKeys(RelNode rel) { for (; ; ) { try { return upsertKeysHandler.getUpsertKeys(rel, this); } catch (JaninoRelMetadataProvider.NoHandler e) { upsertKeysHandler = revise(e.relClass, FlinkMetadata.UpsertKeys.DEF); } } }
3.68
framework_AbstractComponentConnector_updateWidgetStyleNames
/** * Updates the user defined, read-only and error style names for the widget * based the shared state. User defined style names are prefixed with the * primary style name of the widget returned by {@link #getWidget()} * <p> * This method can be overridden to provide additional style names for the * component, for example see {@code AbstractFieldConnector} * </p> */ protected void updateWidgetStyleNames() { Profiler.enter("AbstractComponentConnector.updateWidgetStyleNames"); AbstractComponentState state = getState(); String primaryStyleName = getWidget().getStylePrimaryName(); // Set the core 'v' style name for the widget setWidgetStyleName(StyleConstants.UI_WIDGET, true); // add / remove error style name setWidgetStyleNameWithPrefix(primaryStyleName, StyleConstants.ERROR_EXT, null != state.errorMessage); // add additional user defined style names as class names, prefixed with // component default class name. remove nonexistent style names. // Remove all old stylenames for (int i = 0; i < styleNames.length(); i++) { String oldStyle = styleNames.get(i); setWidgetStyleName(oldStyle, false); setWidgetStyleNameWithPrefix(primaryStyleName + "-", oldStyle, false); } styleNames.setLength(0); if (ComponentStateUtil.hasStyles(state)) { // add new style names for (String newStyle : state.styles) { setWidgetStyleName(newStyle, true); setWidgetStyleNameWithPrefix(primaryStyleName + "-", newStyle, true); styleNames.push(newStyle); } } if (state.primaryStyleName != null && !state.primaryStyleName.equals(primaryStyleName)) { /* * We overwrite the widgets primary stylename if state defines a * primary stylename. This has to be done after updating other * styles to be sure the dependent styles are updated correctly. */ getWidget().setStylePrimaryName(state.primaryStyleName); } // set required style name if components supports that if (this instanceof HasRequiredIndicator) { getWidget().setStyleName(StyleConstants.REQUIRED, ((HasRequiredIndicator) this).isRequiredIndicatorVisible()); } Profiler.leave("AbstractComponentConnector.updateWidgetStyleNames"); }
3.68
graphhopper_RouterConfig_setTimeoutMillis
/** * Limits the runtime of routing requests to the given amount of milliseconds. This only works up to a certain * precision, but should be sufficient to cancel long-running requests in most cases. The exact implementation of * the timeout depends on the routing algorithm. */ public void setTimeoutMillis(long timeoutMillis) { this.timeoutMillis = timeoutMillis; }
3.68
hadoop_FsServerDefaults_getKeyProviderUri
/* null means old style namenode. * "" (empty string) means namenode is upgraded but EZ is not supported. * some string means that value is the key provider. */ public String getKeyProviderUri() { return keyProviderUri; }
3.68
hadoop_OperationDuration_time
/** * Evaluate the system time. * @return the current clock time. */ protected long time() { return System.currentTimeMillis(); }
3.68
framework_Form_isEmpty
/** * {@inheritDoc} * <p> * A Form is empty if all of its fields are empty. * */ @Override public boolean isEmpty() { for (Field<?> f : fields.values()) { if (f instanceof AbstractField) { if (!((AbstractField<?>) f).isEmpty()) { return false; } } } return true; }
3.68
hadoop_BlockManagerParameters_getBufferPoolSize
/** * @return The size of the in-memory cache. */ public int getBufferPoolSize() { return bufferPoolSize; }
3.68
flink_Router_notFound
/** * Sets the fallback target for use when there's no match at {@link #route(HttpMethod, String)}. */ public Router<T> notFound(T target) { this.notFound = target; return this; }
3.68
flink_DefaultBlocklistTracker_tryAddOrMerge
/** * Try to add a new blocked node record. If the node (identified by node id) already exists, the * newly added one will be merged with the existing one. * * @param newNode the new blocked node record * @return the add status */ private AddStatus tryAddOrMerge(BlockedNode newNode) { checkNotNull(newNode); final String nodeId = newNode.getNodeId(); final BlockedNode existingNode = blockedNodes.get(nodeId); if (existingNode == null) { blockedNodes.put(nodeId, newNode); return AddStatus.ADDED; } else { BlockedNode merged = newNode.getEndTimestamp() >= existingNode.getEndTimestamp() ? newNode : existingNode; if (!merged.equals(existingNode)) { blockedNodes.put(nodeId, merged); return AddStatus.MERGED; } return AddStatus.NONE; } }
3.68
hadoop_LocalSASKeyGeneratorImpl_getAccountNameWithoutDomain
/** * Helper method that returns the Storage account name without * the domain name suffix. * @param fullAccountName Storage account name with domain name suffix * @return String */ private String getAccountNameWithoutDomain(String fullAccountName) { StringTokenizer tokenizer = new StringTokenizer(fullAccountName, "."); return tokenizer.nextToken(); }
3.68
flink_ResourceGuard_acquireResource
/** * Acquired access from one new client for the guarded resource. * * @throws IOException when the resource guard is already closed. */ public Lease acquireResource() throws IOException { synchronized (lock) { if (closed) { throw new IOException("Resource guard was already closed."); } ++leaseCount; } return new Lease(); }
3.68
flink_ZooKeeperStateHandleStore_getRootLockPath
/** * Returns the sub-path for lock nodes of the corresponding node (referred to through the passed * {@code rooPath}. The returned sub-path collects the lock nodes for the {@code rootPath}'s * node. The {@code rootPath} is marked for deletion if the sub-path for lock nodes is deleted. */ @VisibleForTesting static String getRootLockPath(String rootPath) { return rootPath + "/locks"; }
3.68
framework_Result_of
/** * Returns a Result representing the result of invoking the given supplier. * If the supplier returns a value, returns a {@code Result.ok} of the * value; if an exception is thrown, returns the message in a * {@code Result.error}. * * @param <R> * the result value type * @param supplier * the supplier to run * @param onError * the function to provide the error message * @return the result of invoking the supplier */ public static <R> Result<R> of(SerializableSupplier<R> supplier, SerializableFunction<Exception, String> onError) { Objects.requireNonNull(supplier, "supplier cannot be null"); Objects.requireNonNull(onError, "onError cannot be null"); try { return ok(supplier.get()); } catch (Exception e) { return error(onError.apply(e)); } }
3.68
hadoop_ConnectionPool_getConnection
/** * Return the next connection round-robin. * * @return Connection context. */ protected ConnectionContext getConnection() { this.lastActiveTime = Time.now(); List<ConnectionContext> tmpConnections = this.connections; for (ConnectionContext tmpConnection : tmpConnections) { if (tmpConnection != null && tmpConnection.isUsable()) { return tmpConnection; } } ConnectionContext conn = null; // We return a connection even if it's busy int size = tmpConnections.size(); if (size > 0) { // Get a connection from the pool following round-robin // Inc and mask off sign bit, lookup index should be non-negative int int threadIndex = this.clientIndex.getAndIncrement() & 0x7FFFFFFF; conn = tmpConnections.get(threadIndex % size); } return conn; }
3.68
hbase_RegionMover_createConf
/** * Creates a new configuration and sets region mover specific overrides */ private static Configuration createConf() { Configuration conf = HBaseConfiguration.create(); conf.setInt("hbase.client.prefetch.limit", 1); conf.setInt("hbase.client.pause", 500); conf.setInt("hbase.client.retries.number", 100); return conf; }
3.68
flink_StreamExecutionEnvironment_getJobListeners
/** Gets the config JobListeners. */ @PublicEvolving public List<JobListener> getJobListeners() { return jobListeners; }
3.68