name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
cron-utils_FieldDefinitionBuilder_withValidRange
/** * Allows to set a range of valid values for field. * * @param startRange - start range value * @param endRange - end range value * @return same FieldDefinitionBuilder instance */ public FieldDefinitionBuilder withValidRange(final int startRange, final int endRange) { constraints.withValidRange(startRange, endRange); return this; }
3.68
flink_BytesKeyNormalizationUtil_putNormalizedKey
/** * Writes the normalized key of given record. The normalized key consists of the key serialized * as bytes and the timestamp of the record. * * <p>NOTE: The key does not represent a logical order. It can be used only for grouping keys! */ static <IN> void putNormalizedKey( Tuple2<byte[], StreamRecord<IN>> record, int dataLength, MemorySegment target, int offset, int numBytes) { byte[] data = record.f0; if (dataLength >= numBytes) { putBytesArray(target, offset, numBytes, data); } else { // whole key fits into the normalized key putBytesArray(target, offset, dataLength, data); int lastOffset = offset + numBytes; offset += dataLength; long valueOfTimestamp = record.f1.asRecord().getTimestamp() - Long.MIN_VALUE; if (dataLength + TIMESTAMP_BYTE_SIZE <= numBytes) { // whole timestamp fits into the normalized key target.putLong(offset, valueOfTimestamp); offset += TIMESTAMP_BYTE_SIZE; // fill in the remaining space with zeros while (offset < lastOffset) { target.put(offset++, (byte) 0); } } else { // only part of the timestamp fits into normalized key for (int i = 0; offset < lastOffset; offset++, i++) { target.put(offset, (byte) (valueOfTimestamp >>> ((7 - i) << 3))); } } } }
3.68
AreaShop_AreaShop_getCommandManager
/** * Function to get the CommandManager. * @return the CommandManager */ public CommandManager getCommandManager() { return commandManager; }
3.68
hbase_RegionCoprocessorHost_postBulkLoadHFile
/** * @param familyPaths pairs of { CF, file path } submitted for bulk load * @param map Map of CF to List of file paths for the final loaded files */ public void postBulkLoadHFile(final List<Pair<byte[], String>> familyPaths, Map<byte[], List<Path>> map) throws IOException { if (this.coprocEnvironments.isEmpty()) { return; } execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult() { @Override public void call(RegionObserver observer) throws IOException { observer.postBulkLoadHFile(this, familyPaths, map); } }); }
3.68
morf_CompositeSchema_tables
/** * @see org.alfasoftware.morf.metadata.Schema#tables() */ @Override public Collection<Table> tables() { Set<Table> result = Sets.newHashSet(); Set<String> seenTables = Sets.newHashSet(); for (Schema schema : delegates) { for (Table table : schema.tables()) { if (seenTables.add(table.getName().toUpperCase())) { result.add(table); } } } return result; }
3.68
pulsar_ProducerConfiguration_setMessageRouter
/** * Set a custom message routing policy by passing an implementation of MessageRouter. * * * @param messageRouter */ public ProducerConfiguration setMessageRouter(MessageRouter messageRouter) { Objects.requireNonNull(messageRouter); setMessageRoutingMode(MessageRoutingMode.CustomPartition); conf.setCustomMessageRouter(messageRouter); return this; }
3.68
hadoop_JobSummary_add
// A little optimization for a very common case SummaryBuilder add(String key, long value) { return _add(key, Long.toString(value)); }
3.68
flink_BinaryStringData_blankString
/** Creates a {@link BinaryStringData} instance that contains `length` spaces. */ public static BinaryStringData blankString(int length) { byte[] spaces = new byte[length]; Arrays.fill(spaces, (byte) ' '); return fromBytes(spaces); }
3.68
hbase_HRegionServer_getOnlineRegion
/** * @return HRegion for the passed binary <code>regionName</code> or null if named region is not * member of the online regions. */ public HRegion getOnlineRegion(final byte[] regionName) { String encodedRegionName = RegionInfo.encodeRegionName(regionName); return this.onlineRegions.get(encodedRegionName); }
3.68
flink_Deadline_fromNow
/** Constructs a Deadline that is a given duration after now. */ public static Deadline fromNow(Duration duration) { return new Deadline( addHandlingOverflow(System.nanoTime(), duration.toNanos()), SystemClock.getInstance()); }
3.68
framework_VaadinSession_getCommunicationManager
/** * @return * * @deprecated As of 7.0. Will likely change or be removed in a future * version */ @Deprecated public LegacyCommunicationManager getCommunicationManager() { assert hasLock(); return communicationManager; }
3.68
morf_SchemaBean_tableNames
/** * {@inheritDoc} * * @see org.alfasoftware.morf.metadata.Schema#tableNames() */ @Override public Collection<String> tableNames() { // Implemented like this rather than tables.keySet() to retain case Set<String> names = new HashSet<>(); for (Table table : tables.values()) { names.add(table.getName()); } return names; }
3.68
flink_AccumulatorRegistry_getUserMap
/** Gets the map for user-defined accumulators. */ public Map<String, Accumulator<?, ?>> getUserMap() { return userAccumulators; }
3.68
framework_CssLayout_getComponentIndex
/** * Returns the index of the given component. * * @param component * The component to look up. * @return The index of the component or -1 if the component is not a child. */ public int getComponentIndex(Component component) { return components.indexOf(component); }
3.68
flink_JsonRowSerializationSchema_withTypeInfo
/** * Sets type information for JSON serialization schema. * * @param typeInfo Type information describing the result type. The field names of {@link * Row} are used to parse the JSON properties. */ public Builder withTypeInfo(TypeInformation<Row> typeInfo) { checkArgument(typeInfo instanceof RowTypeInfo, "Only RowTypeInfo is supported"); this.typeInfo = (RowTypeInfo) typeInfo; return this; }
3.68
framework_Or_appliesToProperty
/** * Returns true if a change in the named property may affect the filtering * result. If some of the sub-filters are not in-memory filters, true is * returned. * * By default, all sub-filters are iterated to check if any of them applies. * If there are no sub-filters, true is returned as an empty Or rejects all * items. */ @Override public boolean appliesToProperty(Object propertyId) { if (getFilters().isEmpty()) { // empty Or filters out everything return true; } else { return super.appliesToProperty(propertyId); } }
3.68
hadoop_TimelineReaderWebServicesUtils_parseRelationFilters
/** * Parse relation filters. * @param expr Relation filter expression * @return a {@link TimelineFilterList} object. * * @throws Exception if any problem occurs. */ static TimelineFilterList parseRelationFilters(String expr) throws TimelineParseException { return parseFilters(new TimelineParserForRelationFilters(expr, TimelineParseConstants.COMMA_CHAR, TimelineParseConstants.COLON_DELIMITER)); }
3.68
hbase_CompactSplit_getRegionSplitLimit
/** Returns the regionSplitLimit */ public int getRegionSplitLimit() { return this.regionSplitLimit; }
3.68
flink_JoinOperator_projectTuple25
/** * Projects a pair of joined elements to a {@link Tuple} with the previously selected * fields. Requires the classes of the fields of the resulting tuples. * * @return The projected data set. * @see Tuple * @see DataSet */ public < T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> ProjectJoin< I1, I2, Tuple25< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>> projectTuple25() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes); TupleTypeInfo< Tuple25< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>> tType = new TupleTypeInfo< Tuple25< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>>(fTypes); return new ProjectJoin< I1, I2, Tuple25< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>>( this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this); }
3.68
hadoop_ConnectionContext_isActiveRecently
/** * Check if the connection is/was active recently. * * @return True if the connection is active or * was active in the past period of time. */ public synchronized boolean isActiveRecently() { return Time.monotonicNow() - this.lastActiveTs <= ACTIVE_WINDOW_TIME; }
3.68
framework_Escalator_calculateMaxColWidth
/** * Iterates through all the cells in a column and returns the width of * the widest element in this RowContainer. * * @param index * the index of the column to inspect * @return the pixel width of the widest element in the indicated column */ public double calculateMaxColWidth(int index) { TableRowElement row = TableRowElement .as(root.getFirstChildElement()); double maxWidth = 0; while (row != null) { final TableCellElement cell = row.getCells().getItem(index); final boolean isVisible = !cell.getStyle().getDisplay() .equals(Display.NONE.getCssName()); if (isVisible) { maxWidth = Math.max(maxWidth, WidgetUtil .getRequiredWidthBoundingClientRectDouble(cell)); } row = TableRowElement.as(row.getNextSiblingElement()); } return maxWidth; }
3.68
AreaShop_FileManager_getConfig
/** * Get the config file (config.yml). * @return YamlConfiguration with the settings of users, with fallback to the settings provided by AreaShop */ public YamlConfiguration getConfig() { return config; }
3.68
hbase_CostFunction_updateWeight
/** * Add the cost of this cost function to the weight of the candidate generator that is optimized * for this cost function. By default it is the RandomCandiateGenerator for a cost function. * Called once per init or after postAction. * @param weights the weights for every generator. */ public void updateWeight(double[] weights) { weights[StochasticLoadBalancer.GeneratorType.RANDOM.ordinal()] += cost(); }
3.68
querydsl_SQLExpressions_listagg
/** * LISTAGG orders data within each group specified in the ORDER BY clause and then concatenates * the values of the measure column. * * @param expr measure column * @param delimiter delimiter * @return listagg(expr, delimiter) */ public static WithinGroup<String> listagg(Expression<?> expr, String delimiter) { return new WithinGroup<String>(String.class, SQLOps.LISTAGG, expr, ConstantImpl.create(delimiter)); }
3.68
framework_OptionGroup_isHtmlContentAllowed
/** * Checks whether captions are interpreted as html or plain text. * * @return true if the captions are used as html, false if used as plain * text * @see #setHtmlContentAllowed(boolean) */ public boolean isHtmlContentAllowed() { return htmlContentAllowed; }
3.68
flink_SizeBasedWindowFunction_windowSizeAttribute
/** The field for the window size. */ default LocalReferenceExpression windowSizeAttribute() { return localRef("window_size", DataTypes.INT()); }
3.68
zxing_Result_getText
/** * @return raw text encoded by the barcode */ public String getText() { return text; }
3.68
hbase_BlockingRpcConnection_handleSaslConnectionFailure
/** * If multiple clients with the same principal try to connect to the same server at the same time, * the server assumes a replay attack is in progress. This is a feature of kerberos. In order to * work around this, what is done is that the client backs off randomly and tries to initiate the * connection again. The other problem is to do with ticket expiry. To handle that, a relogin is * attempted. * <p> * The retry logic is governed by the {@link SaslClientAuthenticationProvider#canRetry()} method. * Some providers have the ability to obtain new credentials and then re-attempt to authenticate * with HBase services. Other providers will continue to fail if they failed the first time -- for * those, we want to fail-fast. * </p> */ private void handleSaslConnectionFailure(final int currRetries, final int maxRetries, final Exception ex, final UserGroupInformation user) throws IOException, InterruptedException { closeSocket(); user.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws IOException, InterruptedException { // A provider which failed authentication, but doesn't have the ability to relogin with // some external system (e.g. username/password, the password either works or it doesn't) if (!provider.canRetry()) { LOG.warn("Exception encountered while connecting to the server " + remoteId.getAddress(), ex); if (ex instanceof RemoteException) { throw (RemoteException) ex; } if (ex instanceof SaslException) { String msg = "SASL authentication failed." + " The most likely cause is missing or invalid credentials."; throw new RuntimeException(msg, ex); } throw new IOException(ex); } // Other providers, like kerberos, could request a new ticket from a keytab. Let // them try again. if (currRetries < maxRetries) { LOG.debug("Exception encountered while connecting to the server " + remoteId.getAddress(), ex); // Invoke the provider to perform the relogin provider.relogin(); // Get rid of any old state on the SaslClient disposeSasl(); // have granularity of milliseconds // we are sleeping with the Connection lock held but since this // connection instance is being used for connecting to the server // in question, it is okay Thread.sleep(ThreadLocalRandom.current().nextInt(reloginMaxBackoff) + 1); return null; } else { String msg = "Failed to initiate connection for " + UserGroupInformation.getLoginUser().getUserName() + " to " + securityInfo.getServerPrincipal(); throw new IOException(msg, ex); } } }); }
3.68
morf_AbstractSelectStatement_where
/** * Specifies the where criteria. For use in code where the criteria are being generated dynamically. * The iterable can be empty but not null. * * @param criteria the criteria to filter the results by. They will be <i>AND</i>ed together. * @return a new select statement with the change applied. */ public T where(Iterable<Criterion> criteria) { return copyOnWriteOrMutate( b -> b.where(criteria), () -> { if (criteria == null) { throw new IllegalArgumentException("No criterion was given in the where clause"); } if (!Iterables.isEmpty(criteria)) { whereCriterion = new Criterion(Operator.AND, criteria); } } ); }
3.68
hudi_HoodieTimeline_isInClosedRange
/** * Return true if specified timestamp is in range [startTs, endTs]. */ static boolean isInClosedRange(String timestamp, String startTs, String endTs) { return HoodieTimeline.compareTimestamps(timestamp, GREATER_THAN_OR_EQUALS, startTs) && HoodieTimeline.compareTimestamps(timestamp, LESSER_THAN_OR_EQUALS, endTs); }
3.68
graphhopper_NavigateResponseConverter_putBannerInstructions
/** * Banner instructions are the turn instructions that are shown to the user in the top bar. * <p> * Between two instructions we can show multiple banner instructions, you can control when they pop up using distanceAlongGeometry. */ private static void putBannerInstructions(InstructionList instructions, double distance, int index, Locale locale, TranslationMap translationMap, ArrayNode bannerInstructions) { /* A BannerInstruction looks like this distanceAlongGeometry: 107, primary: { text: "Lichtensteinstraße", components: [ { text: "Lichtensteinstraße", type: "text", } ], type: "turn", modifier: "right", }, secondary: null, */ ObjectNode bannerInstruction = bannerInstructions.addObject(); //Show from the beginning bannerInstruction.put("distanceAlongGeometry", distance); ObjectNode primary = bannerInstruction.putObject("primary"); putSingleBannerInstruction(instructions.get(index + 1), locale, translationMap, primary); bannerInstruction.putNull("secondary"); if (instructions.size() > index + 2 && instructions.get(index + 2).getSign() != Instruction.REACHED_VIA) { // Sub shows the instruction after the current one ObjectNode sub = bannerInstruction.putObject("sub"); putSingleBannerInstruction(instructions.get(index + 2), locale, translationMap, sub); } }
3.68
hbase_ProcedureExecutor_getProcedure
// ========================================================================== // Executor query helpers // ========================================================================== public Procedure<TEnvironment> getProcedure(final long procId) { return procedures.get(procId); }
3.68
hmily_GsonUtils_toTreeMap
/** * To tree map tree map. * * @param json the json * @return the tree map */ public ConcurrentSkipListMap<String, Object> toTreeMap(final String json) { return GSON_MAP.fromJson(json, new TypeToken<ConcurrentSkipListMap<String, Object>>() { }.getType()); }
3.68
rocketmq-connect_ExpressionBuilder_appendLeadingQuote
/** * Always append to this builder's expression the leading quote character(s) defined by this * builder's {@link IdentifierRules}. * * @return this builder to enable methods to be chained; never null */ public ExpressionBuilder appendLeadingQuote() { return appendLeadingQuote(QuoteMethod.ALWAYS); }
3.68
hadoop_PathOutputCommitterFactory_getCommitterFactory
/** * Get the committer factory for a configuration. * @param outputPath the job's output path. If null, it means that the * schema is unknown and a per-schema factory cannot be determined. * @param conf configuration * @return an instantiated committer factory */ public static PathOutputCommitterFactory getCommitterFactory( Path outputPath, Configuration conf) { // determine which key to look up the overall one or a schema-specific // key LOG.debug("Looking for committer factory for path {}", outputPath); String key = COMMITTER_FACTORY_CLASS; if (StringUtils.isEmpty(conf.getTrimmed(key)) && outputPath != null) { // there is no explicit factory and there's an output path // Get the scheme of the destination String scheme = outputPath.toUri().getScheme(); // and see if it has a key String schemeKey = String.format(COMMITTER_FACTORY_SCHEME_PATTERN, scheme); if (StringUtils.isNotEmpty(conf.getTrimmed(schemeKey))) { // it does, so use that key in the classname lookup LOG.info("Using schema-specific factory for {}", outputPath); key = schemeKey; } else { LOG.debug("No scheme-specific factory defined in {}", schemeKey); } } // create the factory. Before using Configuration.getClass, check // for an empty configuration value, as that raises ClassNotFoundException. Class<? extends PathOutputCommitterFactory> factory; String trimmedValue = conf.getTrimmed(key, ""); if (StringUtils.isEmpty(trimmedValue)) { // empty/null value, use default LOG.info("No output committer factory defined," + " defaulting to FileOutputCommitterFactory"); factory = FileOutputCommitterFactory.class; } else { // key is set, get the class factory = conf.getClass(key, FileOutputCommitterFactory.class, PathOutputCommitterFactory.class); LOG.info("Using OutputCommitter factory class {} from key {}", factory, key); } return ReflectionUtils.newInstance(factory, conf); }
3.68
querydsl_PathBuilder_getArray
/** * Create a ArrayPath instance for the given property and the given array type * * @param <A> * @param <E> * @param property property name * @param type property type * @return property path */ public <A, E> ArrayPath<A, E> getArray(String property, Class<A> type) { validate(property, Array.newInstance(type, 0).getClass()); return super.createArray(property, type); }
3.68
hadoop_SQLDelegationTokenSecretManager_updateDelegationKey
/** * Updates an existing DelegationKey in the SQL database. * @param key Updated DelegationKey. */ @Override protected void updateDelegationKey(DelegationKey key) throws IOException { try (ByteArrayOutputStream bos = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(bos)) { key.write(dos); // Update delegation key in SQL database updateDelegationKey(key.getKeyId(), bos.toByteArray()); // Update delegation key in local cache super.updateDelegationKey(key); } catch (SQLException e) { throw new IOException("Failed to update delegation key in SQL secret manager", e); } }
3.68
flink_UserDefinedFunction_open
/** * Setup method for user-defined function. It can be used for initialization work. By default, * this method does nothing. */ public void open(FunctionContext context) throws Exception { // do nothing }
3.68
morf_AliasedField_withImmutableBuildersEnabled
/** * Allows tests to run with immutable building behaviour turned on. * * TODO remove when we remove the old mutable behaviour * * @param runnable The code to run. */ public static void withImmutableBuildersEnabled(Runnable runnable) { withImmutableBuilders(runnable, true); }
3.68
flink_CopyOnWriteSkipListStateMap_doWriteValue
/** * Write the meta and data for the value to the space where the value pointer points. * * @param valuePointer pointer to the space where the meta and data is written. * @param value data of the value. * @param version version of this value. * @param keyPointer pointer to the key. * @param nextValuePointer pointer to the next value. */ private void doWriteValue( long valuePointer, byte[] value, int version, long keyPointer, long nextValuePointer) { Node node = getNodeSegmentAndOffset(valuePointer); MemorySegment segment = node.nodeSegment; int offsetInSegment = node.nodeOffset; SkipListUtils.putValueVersion(segment, offsetInSegment, version); SkipListUtils.putKeyPointer(segment, offsetInSegment, keyPointer); SkipListUtils.putNextValuePointer(segment, offsetInSegment, nextValuePointer); SkipListUtils.putValueLen(segment, offsetInSegment, value == null ? 0 : value.length); if (value != null) { SkipListUtils.putValueData(segment, offsetInSegment, value); } }
3.68
dubbo_ExpiringMap_setExpirationInterval
/** * set expiration interval * * @param expirationInterval expiration interval (second) */ public void setExpirationInterval(long expirationInterval) { this.expirationIntervalMillis = expirationInterval * 1000; }
3.68
hadoop_Chunk_flushBuffer
/** * Flush the internal buffer. * * Is this the last call to flushBuffer? * * @throws java.io.IOException */ private void flushBuffer() throws IOException { if (count > 0) { writeChunk(buf, 0, count, false); count = 0; } }
3.68
hbase_HbckReport_getOrphanRegionsOnFS
/** * The regions have directory on FileSystem, but no region info in meta. */ public Map<String, Path> getOrphanRegionsOnFS() { return orphanRegionsOnFS; }
3.68
flink_Tuple12_equals
/** * Deep equality for tuples by calling equals() on the tuple members. * * @param o the object checked for equality * @return true if this is equal to o. */ @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof Tuple12)) { return false; } @SuppressWarnings("rawtypes") Tuple12 tuple = (Tuple12) o; if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) { return false; } if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) { return false; } if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) { return false; } if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) { return false; } if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) { return false; } if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) { return false; } if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) { return false; } if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) { return false; } if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) { return false; } if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) { return false; } if (f10 != null ? !f10.equals(tuple.f10) : tuple.f10 != null) { return false; } if (f11 != null ? !f11.equals(tuple.f11) : tuple.f11 != null) { return false; } return true; }
3.68
flink_CoGroupOperator_createCoGroupOperator
/** * Intermediate step of a CoGroup transformation. * * <p>To continue the CoGroup transformation, provide a {@link * org.apache.flink.api.common.functions.RichCoGroupFunction} by calling {@link * org.apache.flink.api.java.operators.CoGroupOperator.CoGroupOperatorSets.CoGroupOperatorSetsPredicate.CoGroupOperatorWithoutFunction#with(org.apache.flink.api.common.functions.CoGroupFunction)}. */ private CoGroupOperatorWithoutFunction createCoGroupOperator(Keys<I2> keys2) { if (keys2 == null) { throw new NullPointerException(); } if (keys2.isEmpty()) { throw new InvalidProgramException("The co-group keys must not be empty."); } try { keys1.areCompatible(keys2); } catch (IncompatibleKeysException ike) { throw new InvalidProgramException( "The pair of co-group keys are not compatible with each other.", ike); } return new CoGroupOperatorWithoutFunction(keys2); }
3.68
hadoop_Server_getName
/** * Returns the name of the server. * * @return the server name. */ public String getName() { return name; }
3.68
flink_SegmentPartitionFileWriter_flush
/** This method is only called by the flushing thread. */ private void flush( TieredStoragePartitionId partitionId, int subpartitionId, int segmentId, List<Tuple2<Buffer, Integer>> buffersToFlush) { try { writeBuffers( partitionId, subpartitionId, segmentId, buffersToFlush, getTotalBytes(buffersToFlush)); buffersToFlush.forEach(bufferToFlush -> bufferToFlush.f0.recycleBuffer()); } catch (IOException exception) { ExceptionUtils.rethrow(exception); } }
3.68
flink_NFAState_setStateChanged
/** Set the changed bit checked via {@link #isStateChanged()} to {@code true}. */ public void setStateChanged() { this.stateChanged = true; }
3.68
streampipes_FlinkRuntime_getStreamSource
/** * This method takes the i's input stream and creates a source for the flink graph * Currently just kafka is supported as a protocol * TODO Add also jms support * * @param i * @return */ private SourceFunction<Map<String, Object>> getStreamSource(int i) { if (runtimeParameters.getModel().getInputStreams().size() - 1 >= i) { SpDataStream stream = runtimeParameters.getModel().getInputStreams().get(i); if (stream != null) { TransportProtocol protocol = stream.getEventGrounding().getTransportProtocol(); TransportFormat format = stream.getEventGrounding().getTransportFormats().get(0); SpDataFormatDefinition dataFormatDefinition = getDataFormatDefinition(format); if (protocol instanceof KafkaTransportProtocol) { return getKafkaConsumer((KafkaTransportProtocol) protocol, dataFormatDefinition); } else if (protocol instanceof JmsTransportProtocol) { return getJmsConsumer((JmsTransportProtocol) protocol, dataFormatDefinition); } else if (protocol instanceof MqttTransportProtocol) { return getMqttConsumer((MqttTransportProtocol) protocol, dataFormatDefinition); } else { return null; } } else { return null; } } else { return null; } }
3.68
hbase_FanOutOneBlockAsyncDFSOutputHelper_createOutput
/** * Create a {@link FanOutOneBlockAsyncDFSOutput}. The method maybe blocked so do not call it * inside an {@link EventLoop}. */ public static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, Path f, boolean overwrite, boolean createParent, short replication, long blockSize, EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass, final StreamSlowMonitor monitor) throws IOException { return new FileSystemLinkResolver<FanOutOneBlockAsyncDFSOutput>() { @Override public FanOutOneBlockAsyncDFSOutput doCall(Path p) throws IOException, UnresolvedLinkException { return createOutput(dfs, p.toUri().getPath(), overwrite, createParent, replication, blockSize, eventLoopGroup, channelClass, monitor); } @Override public FanOutOneBlockAsyncDFSOutput next(FileSystem fs, Path p) throws IOException { throw new UnsupportedOperationException(); } }.resolve(dfs, f); }
3.68
flink_RestartBackoffTimeStrategyFactoryLoader_createRestartBackoffTimeStrategyFactory
/** * Creates {@link RestartBackoffTimeStrategy.Factory} from the given configuration. * * <p>The strategy factory is decided in order as follows: * * <ol> * <li>Strategy set within job graph, i.e. {@link * RestartStrategies.RestartStrategyConfiguration}, unless the config is {@link * RestartStrategies.FallbackRestartStrategyConfiguration}. * <li>Strategy set in the cluster(server-side) config (flink-conf.yaml), unless the strategy * is not specified * <li>{@link * FixedDelayRestartBackoffTimeStrategy.FixedDelayRestartBackoffTimeStrategyFactory} if * checkpointing is enabled. Otherwise {@link * NoRestartBackoffTimeStrategy.NoRestartBackoffTimeStrategyFactory} * </ol> * * @param jobRestartStrategyConfiguration restart configuration given within the job graph * @param clusterConfiguration cluster(server-side) configuration * @param isCheckpointingEnabled if checkpointing is enabled for the job * @return new version restart strategy factory */ public static RestartBackoffTimeStrategy.Factory createRestartBackoffTimeStrategyFactory( final RestartStrategies.RestartStrategyConfiguration jobRestartStrategyConfiguration, final Configuration clusterConfiguration, final boolean isCheckpointingEnabled) { checkNotNull(jobRestartStrategyConfiguration); checkNotNull(clusterConfiguration); return getJobRestartStrategyFactory(jobRestartStrategyConfiguration) .orElse( getClusterRestartStrategyFactory(clusterConfiguration) .orElse(getDefaultRestartStrategyFactory(isCheckpointingEnabled))); }
3.68
framework_ClientRpcWriter_write
/** * Writes a JSON object containing all pending client RPC invocations in the * given UI. * * @param ui * The {@link UI} whose RPC calls to write. * @param writer * The {@link Writer} used to write the JSON. * @throws IOException * If the serialization fails. */ public void write(UI ui, Writer writer) throws IOException { Collection<ClientMethodInvocation> pendingInvocations = collectPendingRpcCalls( ui.getConnectorTracker().getDirtyVisibleConnectors()); JsonArray rpcCalls = Json.createArray(); for (ClientMethodInvocation invocation : pendingInvocations) { // add invocation to rpcCalls try { JsonArray invocationJson = Json.createArray(); invocationJson.set(0, invocation.getConnector().getConnectorId()); invocationJson.set(1, invocation.getInterfaceName()); invocationJson.set(2, invocation.getMethodName()); JsonArray paramJson = Json.createArray(); for (int i = 0; i < invocation .getParameterTypes().length; ++i) { Type parameterType = invocation.getParameterTypes()[i]; JsonValue referenceParameter = null; // TODO Use default values for RPC parameter types // if (!JsonCodec.isInternalType(parameterType)) { // try { // referenceParameter = parameterType.newInstance(); // } catch (Exception e) { // logger.log(Level.WARNING, // "Error creating reference object for parameter of type " // + parameterType.getName()); // } // } EncodeResult encodeResult = JsonCodec.encode( invocation.getParameters()[i], referenceParameter, parameterType, ui.getConnectorTracker()); paramJson.set(i, encodeResult.getEncodedValue()); } invocationJson.set(3, paramJson); rpcCalls.set(rpcCalls.length(), invocationJson); } catch (JsonException e) { throw new PaintException( "Failed to serialize RPC method call parameters for connector " + invocation.getConnector().getConnectorId() + " method " + invocation.getInterfaceName() + "." + invocation.getMethodName() + ": " + e.getMessage(), e); } } writer.write(JsonUtil.stringify(rpcCalls)); }
3.68
flink_CompensatedSum_delta
/** The correction term. */ public double delta() { return delta; }
3.68
hbase_ProcedureExecutor_unregisterNonceIfProcedureWasNotSubmitted
/** * Remove the NonceKey if the procedure was not submitted to the executor. * @param nonceKey A unique identifier for this operation from the client or process. */ public void unregisterNonceIfProcedureWasNotSubmitted(final NonceKey nonceKey) { if (nonceKey == null) { return; } final Long procId = nonceKeysToProcIdsMap.get(nonceKey); if (procId == null) { return; } // if the procedure was not submitted, remove the nonce if (!(procedures.containsKey(procId) || completed.containsKey(procId))) { nonceKeysToProcIdsMap.remove(nonceKey); } }
3.68
framework_StreamResource_getCacheTime
/** * Gets the length of cache expiration time. This gives the adapter the * possibility cache streams sent to the client. The caching may be made in * adapter or at the client if the client supports caching. Default is * <code>DownloadStream.DEFAULT_CACHETIME</code>. * * @return Cache time in milliseconds. */ public long getCacheTime() { return cacheTime; }
3.68
dubbo_DubboDefaultPropertiesEnvironmentPostProcessor_setAllowBeanDefinitionOverriding
/** * Set {@link #ALLOW_BEAN_DEFINITION_OVERRIDING_PROPERTY "spring.main.allow-bean-definition-overriding"} to be * <code>true</code> as default. * * @param defaultProperties the default {@link Properties properties} * @see #ALLOW_BEAN_DEFINITION_OVERRIDING_PROPERTY * @since 2.7.1 */ private void setAllowBeanDefinitionOverriding(Map<String, Object> defaultProperties) { defaultProperties.put(ALLOW_BEAN_DEFINITION_OVERRIDING_PROPERTY, Boolean.TRUE.toString()); }
3.68
framework_VDateField_sendBufferedValues
/** * Sends the {@link #bufferedDateString} and {@link #bufferedResolutions} to * the server, and clears their values. * * @since 8.2 */ public void sendBufferedValues() { rpc.update(bufferedDateString, bufferedResolutions.entrySet().stream().collect( Collectors.toMap(entry -> entry.getKey().name(), entry -> entry.getValue()))); bufferedDateString = null; bufferedResolutions.clear(); }
3.68
hbase_CleanerChore_checkAndDeleteFiles
/** * Run the given files through each of the cleaners to see if it should be deleted, deleting it if * necessary. * @param files List of FileStatus for the files to check (and possibly delete) * @return true iff successfully deleted all files */ private boolean checkAndDeleteFiles(List<FileStatus> files) { if (files == null) { return true; } // first check to see if the path is valid List<FileStatus> validFiles = Lists.newArrayListWithCapacity(files.size()); List<FileStatus> invalidFiles = Lists.newArrayList(); for (FileStatus file : files) { if (validate(file.getPath())) { validFiles.add(file); } else { LOG.warn("Found a wrongly formatted file: " + file.getPath() + " - will delete it."); invalidFiles.add(file); } } Iterable<FileStatus> deletableValidFiles = validFiles; // check each of the cleaners for the valid files for (T cleaner : cleanersChain) { if (cleaner.isStopped() || this.getStopper().isStopped()) { LOG.warn("A file cleaner" + this.getName() + " is stopped, won't delete any more files in:" + this.oldFileDir); return false; } Iterable<FileStatus> filteredFiles = cleaner.getDeletableFiles(deletableValidFiles); // trace which cleaner is holding on to each file if (LOG.isTraceEnabled()) { ImmutableSet<FileStatus> filteredFileSet = ImmutableSet.copyOf(filteredFiles); for (FileStatus file : deletableValidFiles) { if (!filteredFileSet.contains(file)) { LOG.trace(file.getPath() + " is not deletable according to:" + cleaner); } } } deletableValidFiles = filteredFiles; } Iterable<FileStatus> filesToDelete = Iterables.concat(invalidFiles, deletableValidFiles); return deleteFiles(filesToDelete) == files.size(); }
3.68
framework_DesignContext_getComponentByLocalId
/** * Returns a component having the specified local id. If no component is * found, returns null. * * @param localId * The local id of the component * @return a component whose local id equals localId */ public Component getComponentByLocalId(String localId) { return localIdToComponent.get(localId); }
3.68
framework_AbstractInMemoryContainer_getVisibleItemIds
/** * Returns the internal list of visible item identifiers after filtering. * * For internal use only. */ protected List<ITEMIDTYPE> getVisibleItemIds() { if (isFiltered()) { return getFilteredItemIds(); } else { return getAllItemIds(); } }
3.68
framework_InMemoryDataProvider_filteringBySubstring
/** * Wraps this data provider to create a new data provider that is filtered * by a string by checking whether the lower case representation of the * filter value provided in the query is a substring of the lower case * representation of an item property value. Conversion to lower case is * done using the locale of the {@link UI#getCurrent() current UI} if * available, or otherwise {@link Locale#getDefault() the default locale}. * The filter never passes if the item property value is <code>null</code>. * * @param valueProvider * a value provider that gets the string property value, not * <code>null</code> * @return a data provider that filters accordingly, not <code>null</code> */ public default DataProvider<T, String> filteringBySubstring( ValueProvider<T, String> valueProvider) { return InMemoryDataProviderHelpers.filteringByCaseInsensitiveString( this, valueProvider, String::contains, InMemoryDataProviderHelpers.CURRENT_LOCALE_SUPPLIER); }
3.68
hbase_ReplicationPeerConfigUtil_parseTableCFs
/** * Parse bytes into TableCFs. It is used for backward compatibility. Old format bytes have no * PB_MAGIC Header */ public static ReplicationProtos.TableCF[] parseTableCFs(byte[] bytes) throws IOException { if (bytes == null) { return null; } return ReplicationPeerConfigUtil.convert(Bytes.toString(bytes)); }
3.68
MagicPlugin_CastPermissionManager_getPersonalCastPermission
/** * This will perform cast permission checks for a specific location. * This will override the region permission, and is generally for use inside of a player's * personal protected area, when it may be contained within a larger globally protected area. * * @return false to deny cast permission, null to not care. Returning true means the cast will be allowed, * including breaking/building blocks, even if it otherwise would not be allowed. */ @Nullable default Boolean getPersonalCastPermission(Player player, SpellTemplate spell, Location location) { return null; }
3.68
hadoop_Summarizer_finalize
/** * This finalizes the summarizer. */ @SuppressWarnings("unchecked") void finalize(JobFactory factory, String path, long size, UserResolver resolver, DataStatistics stats, Configuration conf) throws IOException { executionSummarizer.finalize(factory, path, size, resolver, stats, conf); }
3.68
hadoop_SchedulerHealth_getLastReleaseDetails
/** * Get the details of last release. * * @return last release details */ public DetailedInformation getLastReleaseDetails() { return getDetailedInformation(Operation.RELEASE); }
3.68
hbase_QuotaTableUtil_getTableSnapshots
/** * Returns a multimap for all existing table snapshot entries. * @param conn connection to re-use */ public static Multimap<TableName, String> getTableSnapshots(Connection conn) throws IOException { try (Table quotaTable = conn.getTable(QUOTA_TABLE_NAME); ResultScanner rs = quotaTable.getScanner(createScanForSpaceSnapshotSizes())) { Multimap<TableName, String> snapshots = HashMultimap.create(); for (Result r : rs) { CellScanner cs = r.cellScanner(); while (cs.advance()) { Cell c = cs.current(); final String snapshot = extractSnapshotNameFromSizeCell(c); snapshots.put(getTableFromRowKey(r.getRow()), snapshot); } } return snapshots; } }
3.68
flink_IOUtils_cleanup
/** * Close the AutoCloseable objects and <b>ignore</b> any {@link Exception} or null pointers. * Must only be used for cleanup in exception handlers. * * @param log the log to record problems to at debug level. Can be <code>null</code>. * @param closeables the objects to close */ public static void cleanup(final Logger log, final AutoCloseable... closeables) { for (AutoCloseable c : closeables) { if (c != null) { try { c.close(); } catch (Exception e) { if (log != null && log.isDebugEnabled()) { log.debug("Exception in closing " + c, e); } } } } }
3.68
hbase_HRegionFileSystem_getRegionInfo
/** Returns the {@link RegionInfo} that describe this on-disk region view */ public RegionInfo getRegionInfo() { return this.regionInfo; }
3.68
hudi_UTF8StringBuilder_grow
// Grows the buffer by at least `neededSize` private void grow(int neededSize) { if (neededSize > ARRAY_MAX - totalSize()) { throw new UnsupportedOperationException( "Cannot grow internal buffer by size " + neededSize + " because the size after growing " + "exceeds size limitation " + ARRAY_MAX); } final int length = totalSize() + neededSize; if (buffer.length < length) { int newLength = length < ARRAY_MAX / 2 ? length * 2 : ARRAY_MAX; final byte[] tmp = new byte[newLength]; Platform.copyMemory( buffer, Platform.BYTE_ARRAY_OFFSET, tmp, Platform.BYTE_ARRAY_OFFSET, totalSize()); buffer = tmp; } }
3.68
morf_ChangeColumn_isApplied
/** * @see org.alfasoftware.morf.upgrade.SchemaChange#isApplied(Schema, ConnectionResources) */ @Override public boolean isApplied(Schema schema, ConnectionResources database) { if (!schema.tableExists(tableName)) { return false; } Table table = schema.getTable(tableName); SchemaHomology homology = new SchemaHomology(); for (Column column : table.columns()) { if (homology.columnsMatch(column, toColumn)) { return true; } } return false; }
3.68
framework_AbstractSelect_setItemIconPropertyId
/** * Sets the item icon property. * * <p> * If the property id is set to a valid value, each item is given an icon * got from the given property of the items. The type of the property must * be assignable to Resource. * </p> * * <p> * Note : The icons set with <code>setItemIcon</code> function override the * icons from the property. * </p> * * <p> * Setting the property id to null disables this feature. The id is null by * default * </p> * . * * @param propertyId * the id of the property that specifies icons for items or null * @throws IllegalArgumentException * If the propertyId is not in the container or is not of a * valid type */ public void setItemIconPropertyId(Object propertyId) throws IllegalArgumentException { if (propertyId == null) { itemIconPropertyId = null; } else if (!getContainerPropertyIds().contains(propertyId)) { throw new IllegalArgumentException( "Property id not found in the container"); } else if (Resource.class.isAssignableFrom(getType(propertyId))) { itemIconPropertyId = propertyId; } else { throw new IllegalArgumentException( "Property type must be assignable to Resource"); } markAsDirty(); }
3.68
framework_VAbstractSplitPanel_convertToPositionUnits
/** * Converts given string to the same units as the split position is. * * @param pos * position to be converted * @return converted position string */ private String convertToPositionUnits(String pos) { if (position.indexOf("%") != -1 && pos.indexOf("%") == -1) { // position is in percentage, pos in pixels pos = convertToPercentage(pos) + "%"; } else if (position.indexOf("px") > 0 && pos.indexOf("px") == -1) { // position is in pixels and pos in percentage pos = convertToPixels(pos) + "px"; } return pos; }
3.68
hbase_Encryption_getConfiguredHashAlgorithm
/** * Returns the Hash Algorithm defined in the crypto configuration. */ public static String getConfiguredHashAlgorithm(Configuration conf) { return conf.getTrimmed(CRYPTO_KEY_HASH_ALGORITHM_CONF_KEY, CRYPTO_KEY_HASH_ALGORITHM_CONF_DEFAULT); }
3.68
hadoop_AbfsConfiguration_getTracingHeaderFormat
/** * Enum config to allow user to pick format of x-ms-client-request-id header * @return tracingContextFormat config if valid, else default ALL_ID_FORMAT */ public TracingHeaderFormat getTracingHeaderFormat() { return getEnum(FS_AZURE_TRACINGHEADER_FORMAT, TracingHeaderFormat.ALL_ID_FORMAT); }
3.68
hadoop_ReplicaUnderConstruction_setState
/** * Set replica state. */ void setState(HdfsServerConstants.ReplicaState s) { state = s; }
3.68
flink_Configuration_contains
/** * Checks whether there is an entry for the given config option. * * @param configOption The configuration option * @return <tt>true</tt> if a valid (current or deprecated) key of the config option is stored, * <tt>false</tt> otherwise */ @PublicEvolving public boolean contains(ConfigOption<?> configOption) { synchronized (this.confData) { final BiFunction<String, Boolean, Optional<Boolean>> applier = (key, canBePrefixMap) -> { if (canBePrefixMap && containsPrefixMap(this.confData, key) || this.confData.containsKey(key)) { return Optional.of(true); } return Optional.empty(); }; return applyWithOption(configOption, applier).orElse(false); } }
3.68
framework_Header_setDefault
/** * Sets whether this row is the default header row. * * @param defaultHeader * {@code true} to set to default, {@code false} otherwise. */ protected void setDefault(boolean defaultHeader) { getRowState().defaultHeader = defaultHeader; }
3.68
hbase_Import_filterKv
/** * Attempt to filter out the keyvalue * @param c {@link Cell} on which to apply the filter * @return <tt>null</tt> if the key should not be written, otherwise returns the original * {@link Cell} */ public static Cell filterKv(Filter filter, Cell c) throws IOException { // apply the filter and skip this kv if the filter doesn't apply if (filter != null) { Filter.ReturnCode code = filter.filterCell(c); if (LOG.isTraceEnabled()) { LOG.trace("Filter returned:" + code + " for the cell:" + c); } // if its not an accept type, then skip this kv if ( !(code.equals(Filter.ReturnCode.INCLUDE) || code.equals(Filter.ReturnCode.INCLUDE_AND_NEXT_COL)) ) { return null; } } return c; }
3.68
flink_InstantiationUtil_clone
/** * Clones the given serializable object using Java serialization, using the given classloader to * resolve the cloned classes. * * @param obj Object to clone * @param classLoader The classloader to resolve the classes during deserialization. * @param <T> Type of the object to clone * @return Cloned object * @throws IOException Thrown if the serialization or deserialization process fails. * @throws ClassNotFoundException Thrown if any of the classes referenced by the object cannot * be resolved during deserialization. */ public static <T extends Serializable> T clone(T obj, ClassLoader classLoader) throws IOException, ClassNotFoundException { if (obj == null) { return null; } else { final byte[] serializedObject = serializeObject(obj); return deserializeObject(serializedObject, classLoader); } }
3.68
hbase_CatalogFamilyFormat_hasMergeRegions
/** * Returns True if any merge regions present in <code>cells</code>; i.e. the column in * <code>cell</code> matches the regex 'info:merge.*'. */ public static boolean hasMergeRegions(Cell[] cells) { for (Cell cell : cells) { if (isMergeQualifierPrefix(cell)) { return true; } } return false; }
3.68
flink_RocksDBIncrementalCheckpointUtils_clipDBWithKeyGroupRange
/** * The method to clip the db instance according to the target key group range using the {@link * RocksDB#delete(ColumnFamilyHandle, byte[])}. * * @param db the RocksDB instance to be clipped. * @param columnFamilyHandles the column families in the db instance. * @param targetKeyGroupRange the target key group range. * @param currentKeyGroupRange the key group range of the db instance. * @param keyGroupPrefixBytes Number of bytes required to prefix the key groups. */ public static void clipDBWithKeyGroupRange( @Nonnull RocksDB db, @Nonnull List<ColumnFamilyHandle> columnFamilyHandles, @Nonnull KeyGroupRange targetKeyGroupRange, @Nonnull KeyGroupRange currentKeyGroupRange, @Nonnegative int keyGroupPrefixBytes) throws RocksDBException { final byte[] beginKeyGroupBytes = new byte[keyGroupPrefixBytes]; final byte[] endKeyGroupBytes = new byte[keyGroupPrefixBytes]; if (currentKeyGroupRange.getStartKeyGroup() < targetKeyGroupRange.getStartKeyGroup()) { CompositeKeySerializationUtils.serializeKeyGroup( currentKeyGroupRange.getStartKeyGroup(), beginKeyGroupBytes); CompositeKeySerializationUtils.serializeKeyGroup( targetKeyGroupRange.getStartKeyGroup(), endKeyGroupBytes); deleteRange(db, columnFamilyHandles, beginKeyGroupBytes, endKeyGroupBytes); } if (currentKeyGroupRange.getEndKeyGroup() > targetKeyGroupRange.getEndKeyGroup()) { CompositeKeySerializationUtils.serializeKeyGroup( targetKeyGroupRange.getEndKeyGroup() + 1, beginKeyGroupBytes); CompositeKeySerializationUtils.serializeKeyGroup( currentKeyGroupRange.getEndKeyGroup() + 1, endKeyGroupBytes); deleteRange(db, columnFamilyHandles, beginKeyGroupBytes, endKeyGroupBytes); } }
3.68
framework_BasicEventProvider_addEventSetChangeListener
/* * (non-Javadoc) * * @see com.vaadin.addon.calendar.ui.CalendarComponentEvents. * EventSetChangeNotifier #addListener * (com.vaadin.addon.calendar.ui.CalendarComponentEvents. * EventSetChangeListener ) */ @Override public void addEventSetChangeListener(EventSetChangeListener listener) { listeners.add(listener); }
3.68
flink_PythonConfig_getLocalTimeZone
/** * Returns the current session time zone id. It is used when converting to/from {@code TIMESTAMP * WITH LOCAL TIME ZONE}. * * @see org.apache.flink.table.types.logical.LocalZonedTimestampType */ private static ZoneId getLocalTimeZone(ReadableConfig config) { String zone = config.get(TableConfigOptions.LOCAL_TIME_ZONE); return TableConfigOptions.LOCAL_TIME_ZONE.defaultValue().equals(zone) ? ZoneId.systemDefault() : ZoneId.of(zone); }
3.68
hbase_RequestConverter_buildSetSnapshotCleanupRequest
/** * Creates SetSnapshotCleanupRequest for turning on/off auto snapshot cleanup * @param enabled Set to <code>true</code> to enable, <code>false</code> to disable. * @param synchronous If <code>true</code>, it waits until current snapshot cleanup is completed, * if outstanding. * @return a SetSnapshotCleanupRequest */ public static SetSnapshotCleanupRequest buildSetSnapshotCleanupRequest(final boolean enabled, final boolean synchronous) { return SetSnapshotCleanupRequest.newBuilder().setEnabled(enabled).setSynchronous(synchronous) .build(); }
3.68
hudi_HoodieTableMetaClient_initTableAndGetMetaClient
/** * Helper method to initialize a given path as a hoodie table with configs passed in as Properties. * * @return Instance of HoodieTableMetaClient */ public static HoodieTableMetaClient initTableAndGetMetaClient(Configuration hadoopConf, String basePath, Properties props) throws IOException { initTableMetaClient(hadoopConf, basePath, props); // We should not use fs.getConf as this might be different from the original configuration // used to create the fs in unit tests HoodieTableMetaClient metaClient = HoodieTableMetaClient.builder().setConf(hadoopConf).setBasePath(basePath) .setMetaserverConfig(props) .build(); LOG.info("Finished initializing Table of type " + metaClient.getTableConfig().getTableType() + " from " + basePath); return metaClient; }
3.68
pulsar_TripleLongPriorityQueue_pop
/** * Removes the first item from the queue. */ public void pop() { checkArgument(tuplesCount != 0); swap(0, tuplesCount - 1); tuplesCount--; siftDown(0); shrinkCapacity(); }
3.68
flink_PojoSerializerSnapshot_constructReconfiguredPojoSerializer
/** * Creates a reconfigured version of the {@link PojoSerializer}. * * @param originalNewPojoSerializer the original new {@link PojoSerializer} to create a * reconfigured version of. * @param fieldSerializerCompatibility compatibility of preexisting fields' serializers. * @param registeredSerializerSnapshots snapshot of previous registered subclasses' serializers. * @param preExistingRegistrationsCompatibility compatibility of preexisting subclasses' * serializers. * @param nonRegisteredSubclassSerializerSnapshots snapshot of previous non-registered * subclasses' serializers. * @return a reconfigured version of the original new {@link PojoSerializer}. */ private static <T> PojoSerializer<T> constructReconfiguredPojoSerializer( PojoSerializer<T> originalNewPojoSerializer, IntermediateCompatibilityResult<T> fieldSerializerCompatibility, LinkedOptionalMap<Class<?>, TypeSerializerSnapshot<?>> registeredSerializerSnapshots, IntermediateCompatibilityResult<T> preExistingRegistrationsCompatibility, LinkedOptionalMap<Class<?>, TypeSerializerSnapshot<?>> nonRegisteredSubclassSerializerSnapshots) { @SuppressWarnings("unchecked") final TypeSerializer<Object>[] reconfiguredFieldSerializers = constructReconfiguredFieldSerializers(fieldSerializerCompatibility); Tuple2<LinkedHashMap<Class<?>, Integer>, TypeSerializer<Object>[]> reconfiguredSubclassRegistry = constructReconfiguredSubclassRegistry( originalNewPojoSerializer.getBundledSubclassSerializerRegistry(), registeredSerializerSnapshots, preExistingRegistrationsCompatibility); return new PojoSerializer<>( originalNewPojoSerializer.getPojoClass(), originalNewPojoSerializer.getFields(), reconfiguredFieldSerializers, reconfiguredSubclassRegistry.f0, reconfiguredSubclassRegistry.f1, restoreSerializers(nonRegisteredSubclassSerializerSnapshots.unwrapOptionals()), originalNewPojoSerializer.getExecutionConfig()); }
3.68
flink_StateDescriptor_isSerializerInitialized
/** * Checks whether the serializer has been initialized. Serializer initialization is lazy, to * allow parametrization of serializers with an {@link ExecutionConfig} via {@link * #initializeSerializerUnlessSet(ExecutionConfig)}. * * @return True if the serializers have been initialized, false otherwise. */ public boolean isSerializerInitialized() { return serializerAtomicReference.get() != null; }
3.68
hbase_QuotaTableUtil_parseSnapshotSize
/** * Parses the snapshot size from the given Cell's value. */ static long parseSnapshotSize(Cell c) throws InvalidProtocolBufferException { ByteString bs = UnsafeByteOperations.unsafeWrap(c.getValueArray(), c.getValueOffset(), c.getValueLength()); return QuotaProtos.SpaceQuotaSnapshot.parseFrom(bs).getQuotaUsage(); }
3.68
flink_AsyncCheckpointRunnable_cleanup
/** @return discarded full/incremental size (if available). */ private Tuple2<Long, Long> cleanup() throws Exception { LOG.debug( "Cleanup AsyncCheckpointRunnable for checkpoint {} of {}.", checkpointMetaData.getCheckpointId(), taskName); Exception exception = null; // clean up ongoing operator snapshot results and non partitioned state handles long stateSize = 0, checkpointedSize = 0; for (OperatorSnapshotFutures operatorSnapshotResult : operatorSnapshotsInProgress.values()) { if (operatorSnapshotResult != null) { try { Tuple2<Long, Long> tuple2 = operatorSnapshotResult.cancel(); stateSize += tuple2.f0; checkpointedSize += tuple2.f1; } catch (Exception cancelException) { exception = ExceptionUtils.firstOrSuppressed(cancelException, exception); } } } if (null != exception) { throw exception; } return Tuple2.of(stateSize, checkpointedSize); }
3.68
pulsar_ProxyExtensionsUtils_load
/** * Load the extension according to the handler definition. * * @param metadata the extension definition. * @return */ static ProxyExtensionWithClassLoader load(ProxyExtensionMetadata metadata, String narExtractionDirectory) throws IOException { final File narFile = metadata.getArchivePath().toAbsolutePath().toFile(); NarClassLoader ncl = NarClassLoaderBuilder.builder() .narFile(narFile) .parentClassLoader(ProxyExtension.class.getClassLoader()) .extractionDirectory(narExtractionDirectory) .build(); ProxyExtensionDefinition phDef = getProxyExtensionDefinition(ncl); if (StringUtils.isBlank(phDef.getExtensionClass())) { throw new IOException("extension `" + phDef.getName() + "` does NOT provide a protocol" + " handler implementation"); } try { Class extensionClass = ncl.loadClass(phDef.getExtensionClass()); Object extension = extensionClass.newInstance(); if (!(extension instanceof ProxyExtension)) { throw new IOException("Class " + phDef.getExtensionClass() + " does not implement extension interface"); } ProxyExtension ph = (ProxyExtension) extension; return new ProxyExtensionWithClassLoader(ph, ncl); } catch (Throwable t) { rethrowIOException(t); return null; } }
3.68
flink_AbstractKeyedStateBackend_getCurrentKeyGroupIndex
/** @see KeyedStateBackend */ public int getCurrentKeyGroupIndex() { return this.keyContext.getCurrentKeyGroupIndex(); }
3.68
flink_CheckpointFailureReason_isPreFlight
/** * @return true if this value indicates a failure reason happening before a checkpoint is passed * to a job's tasks. */ public boolean isPreFlight() { return preFlight; }
3.68
framework_VCalendar_getScrollListener
/** * Get the listener that listens to when the calendar widget is scrolled. * * @return */ public ScrollListener getScrollListener() { return scrollListener; }
3.68
flink_KeyedStream_between
/** * Specifies the time boundaries over which the join operation works, so that * * <pre> * leftElement.timestamp + lowerBound <= rightElement.timestamp <= leftElement.timestamp + upperBound * </pre> * * <p>By default both the lower and the upper bound are inclusive. This can be configured * with {@link IntervalJoined#lowerBoundExclusive()} and {@link * IntervalJoined#upperBoundExclusive()} * * @param lowerBound The lower bound. Needs to be smaller than or equal to the upperBound * @param upperBound The upper bound. Needs to be bigger than or equal to the lowerBound */ @PublicEvolving public IntervalJoined<T1, T2, KEY> between(Time lowerBound, Time upperBound) { if (timeBehaviour != TimeBehaviour.EventTime) { throw new UnsupportedTimeCharacteristicException( "Time-bounded stream joins are only supported in event time"); } checkNotNull(lowerBound, "A lower bound needs to be provided for a time-bounded join"); checkNotNull(upperBound, "An upper bound needs to be provided for a time-bounded join"); return new IntervalJoined<>( streamOne, streamTwo, lowerBound.toMilliseconds(), upperBound.toMilliseconds(), true, true); }
3.68
morf_SchemaValidator_validateName
/** * Validates the basic naming rules for a database object (currently a table or view). */ private void validateName(String tableOrViewName) { if (!isEntityNameLengthValid(tableOrViewName)) { validationFailures.add("Name of table or view [" + tableOrViewName + "] is not allowed - it is over " + MAX_LENGTH + " characters long"); } if (isSQLReservedWord(tableOrViewName)) { validationFailures.add("Name of table or view [" + tableOrViewName + "] is not allowed - it is an SQL reserved word"); } if (!isNameConventional(tableOrViewName)) { validationFailures.add("Name of table or view [" + tableOrViewName + "] is not allowed - it must match " + validNamePattern.toString()); } }
3.68
flink_ProducerMergedPartitionFileIndex_getSize
/** Get the total size in bytes of this region, including the fields and the buffers. */ @Override public int getSize() { return REGION_SIZE + numBuffers; }
3.68
pulsar_TopicMessageIdImpl_getTopicPartitionName
/** * Get the topic name which contains partition part for this message. * @return the topic name which contains Partition part */ @Deprecated public String getTopicPartitionName() { return getOwnerTopic(); }
3.68
AreaShop_GeneralRegion_getBooleanSetting
/** * Get a boolean setting for this region, defined as follows * - If the region has the setting in its own file (/regions/regionName.yml), use that * - If the region has groups, use the setting defined by the most important group, if any * - Otherwise fallback to the default.yml file setting * @param path The path to get the setting of * @return The value of the setting (strings are handled as booleans) */ public boolean getBooleanSetting(String path) { if(config.isSet(path)) { if(config.isString(path)) { return config.getString(path).equalsIgnoreCase("true"); } return config.getBoolean(path); } boolean result = false; int priority = Integer.MIN_VALUE; boolean found = false; for(RegionGroup group : plugin.getFileManager().getGroups()) { if(group.isMember(this) && group.getSettings().isSet(path) && group.getPriority() > priority) { if(group.getSettings().isString(path)) { result = group.getSettings().getString(path).equalsIgnoreCase("true"); } else { result = group.getSettings().getBoolean(path); } priority = group.getPriority(); found = true; } } if(found) { return result; } if(this.getFileManager().getRegionSettings().isString(path)) { return this.getFileManager().getRegionSettings().getString(path).equalsIgnoreCase("true"); } if(this.getFileManager().getRegionSettings().isSet(path)) { return this.getFileManager().getRegionSettings().getBoolean(path); } else { return this.getFileManager().getFallbackRegionSettings().getBoolean(path); } }
3.68
morf_ConnectionResourcesBean_getPort
/** * @see org.alfasoftware.morf.jdbc.AbstractConnectionResources#getPort() */ @Override public int getPort() { return port; }
3.68