name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hadoop_SchedulerHealth_getLastSchedulerRunTime
/** * Get the timestamp of the latest scheduler operation. * * @return the scheduler's latest timestamp */ public long getLastSchedulerRunTime() { return lastSchedulerRunTime; }
3.68
morf_OracleDialect_addIndexStatements
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#addIndexStatements(org.alfasoftware.morf.metadata.Table, org.alfasoftware.morf.metadata.Index) */ @Override public Collection<String> addIndexStatements(Table table, Index index) { return ImmutableList.of( // when adding indexes to existing tables, use PARALLEL NOLOGGING to efficiently build the index Iterables.getOnlyElement(indexDeploymentStatements(table, index)) + " PARALLEL NOLOGGING", indexPostDeploymentStatements(index) ); }
3.68
morf_SqlServerDialect_changePrimaryKeyColumns
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#changePrimaryKeyColumns(org.alfasoftware.morf.metadata.Table, java.util.List, java.util.List) */ @Override public Collection<String> changePrimaryKeyColumns(Table table, List<String> oldPrimaryKeyColumns, List<String> newPrimaryKeyColumns) { List<String> statements = new ArrayList<>(); if (!oldPrimaryKeyColumns.isEmpty()) { statements.add(dropPrimaryKey(table)); } if (!newPrimaryKeyColumns.isEmpty()) { statements.add(new StringBuilder() .append("ALTER TABLE ").append(schemaNamePrefix()).append(table.getName()).append(" ADD ") .append(buildPrimaryKeyConstraint(table.getName(), newPrimaryKeyColumns)) .toString() ); } return statements; }
3.68
hbase_SpaceQuotaSnapshot_notInViolation
/** * Returns a singleton referring to a quota which is not in violation. */ public static SpaceQuotaStatus notInViolation() { return NOT_IN_VIOLATION; }
3.68
hadoop_ColumnRWHelper_readResultsWithTimestamps
/** * @param result from which to reads data with timestamps * @param columnPrefixBytes optional prefix to limit columns. If null all * columns are returned. * @param <K> identifies the type of column name(indicated by type of key * converter). * @param <V> the type of the values. The values will be cast into that type. * @param keyConverter used to convert column bytes to the appropriate key * type. * @return the cell values at each respective time in for form * {@literal {idA={timestamp1->value1}, idA={timestamp2->value2}, * idB={timestamp3->value3}, idC={timestamp1->value4}}} * @throws IOException if any problem occurs while reading results. */ @SuppressWarnings("unchecked") public static <K, V> NavigableMap<K, NavigableMap<Long, V>> readResultsWithTimestamps(Result result, byte[] columnFamilyBytes, byte[] columnPrefixBytes, KeyConverter<K> keyConverter, ValueConverter valueConverter, boolean supplementTs) throws IOException { NavigableMap<K, NavigableMap<Long, V>> results = new TreeMap<>(); if (result != null) { NavigableMap< byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> resultMap = result.getMap(); NavigableMap<byte[], NavigableMap<Long, byte[]>> columnCellMap = resultMap.get(columnFamilyBytes); // could be that there is no such column family. if (columnCellMap != null) { for (Map.Entry<byte[], NavigableMap<Long, byte[]>> entry : columnCellMap .entrySet()) { K converterColumnKey = null; if (columnPrefixBytes == null) { LOG.debug("null prefix was specified; returning all columns"); try { converterColumnKey = keyConverter.decode(entry.getKey()); } catch (IllegalArgumentException iae) { LOG.error("Illegal column found, skipping this column.", iae); continue; } } else { // A non-null prefix means columns are actually of the form // prefix!columnNameRemainder byte[][] columnNameParts = Separator.QUALIFIERS.split(entry.getKey(), 2); byte[] actualColumnPrefixBytes = columnNameParts[0]; if (Bytes.equals(columnPrefixBytes, actualColumnPrefixBytes) && columnNameParts.length == 2) { try { // This is the prefix that we want converterColumnKey = keyConverter.decode(columnNameParts[1]); } catch (IllegalArgumentException iae) { LOG.error("Illegal column found, skipping this column.", iae); continue; } } } // If this column has the prefix we want if (converterColumnKey != null) { NavigableMap<Long, V> cellResults = new TreeMap<Long, V>(); NavigableMap<Long, byte[]> cells = entry.getValue(); if (cells != null) { for (Map.Entry<Long, byte[]> cell : cells.entrySet()) { V value = (V) valueConverter.decodeValue(cell.getValue()); Long ts = supplementTs ? TimestampGenerator. getTruncatedTimestamp(cell.getKey()) : cell.getKey(); cellResults.put(ts, value); } } results.put(converterColumnKey, cellResults); } } // for entry : columnCellMap } // if columnCellMap != null } // if result != null return results; }
3.68
framework_VDateTimeCalendarPanel_isTimeSelectorNeeded
/** * Do we need the time selector * * @return True if it is required */ private boolean isTimeSelectorNeeded() { return getResolution().compareTo(DateTimeResolution.DAY) < 0; }
3.68
framework_VCalendarPanel_handleNavigation
/** * Handles the keyboard navigation. * * @param keycode * The key code that was pressed * @param ctrl * Was the ctrl key pressed * @param shift * Was the shift key pressed * @return Return true if key press was handled by the component, else * return false */ protected boolean handleNavigation(int keycode, boolean ctrl, boolean shift) { if (!isEnabled() || isReadonly()) { return false; } else if (resolution == Resolution.YEAR) { return handleNavigationYearMode(keycode, ctrl, shift); } else if (resolution == Resolution.MONTH) { return handleNavigationMonthMode(keycode, ctrl, shift); } else if (resolution == Resolution.DAY) { return handleNavigationDayMode(keycode, ctrl, shift); } else { return handleNavigationDayMode(keycode, ctrl, shift); } }
3.68
framework_BasicEvent_setStyleName
/* * (non-Javadoc) * * @see * com.vaadin.addon.calendar.event.CalendarEventEditor#setStyleName(java * .lang.String) */ @Override public void setStyleName(String styleName) { this.styleName = styleName; fireEventChange(); }
3.68
hbase_TableResource_getName
/** Returns the table name */ String getName() { return table; }
3.68
hbase_Constraints_getConstraints
/** * Get the constraints stored in the table descriptor * @param desc To read from * @param classloader To use when loading classes. If a special classloader is used on a region, * for instance, then that should be the classloader used to load the * constraints. This could also apply to unit-testing situation, where want to * ensure that class is reloaded or not. * @return List of configured {@link Constraint Constraints} * @throws IOException if any part of reading/arguments fails */ static List<? extends Constraint> getConstraints(TableDescriptor desc, ClassLoader classloader) throws IOException { List<Constraint> constraints = new ArrayList<>(); // loop through all the key, values looking for constraints for (Map.Entry<Bytes, Bytes> e : desc.getValues().entrySet()) { // read out the constraint String key = Bytes.toString(e.getKey().get()).trim(); String[] className = CONSTRAINT_HTD_ATTR_KEY_PATTERN.split(key); if (className.length == 2) { key = className[1]; if (LOG.isDebugEnabled()) { LOG.debug("Loading constraint:" + key); } // read in the rest of the constraint Configuration conf; try { conf = readConfiguration(e.getValue().get()); } catch (IOException e1) { // long that we don't have a valid configuration stored, and move on. LOG.warn("Corrupted configuration found for key:" + key + ", skipping it."); continue; } // if it is not enabled, skip it if (!conf.getBoolean(ENABLED_KEY, false)) { LOG.debug("Constraint: {} is DISABLED - skipping it", key); // go to the next constraint continue; } try { // add the constraint, now that we expect it to be valid. Class<? extends Constraint> clazz = classloader.loadClass(key).asSubclass(Constraint.class); Constraint constraint = clazz.getDeclaredConstructor().newInstance(); constraint.setConf(conf); constraints.add(constraint); } catch (InvocationTargetException | NoSuchMethodException | ClassNotFoundException | InstantiationException | IllegalAccessException e1) { throw new IOException(e1); } } } // sort them, based on the priorities Collections.sort(constraints, constraintComparator); return constraints; }
3.68
framework_DefaultEditorEventHandler_isChanged
/** * Returns whether the cursor move has either horizontal or vertical * changes. * * @return {@code true} if there are changes, {@code false} otherwise */ public boolean isChanged() { return rowDelta != 0 || colDelta != 0; }
3.68
AreaShop_GeneralRegion_getWidth
/** * Get the width of the region (x-axis). * @return The width of the region (x-axis) */ @Override public int getWidth() { if(getRegion() == null) { return 0; } return getMaximumPoint().getBlockX() - getMinimumPoint().getBlockX() + 1; }
3.68
hadoop_BooleanWritable_set
/** * Set the value of the BooleanWritable. * @param value value. */ public void set(boolean value) { this.value = value; }
3.68
zxing_PDF417_calculateNumberOfRows
/** * Calculates the necessary number of rows as described in annex Q of ISO/IEC 15438:2001(E). * * @param m the number of source codewords prior to the additional of the Symbol Length * Descriptor and any pad codewords * @param k the number of error correction codewords * @param c the number of columns in the symbol in the data region (excluding start, stop and * row indicator codewords) * @return the number of rows in the symbol (r) */ private static int calculateNumberOfRows(int m, int k, int c) { int r = ((m + 1 + k) / c) + 1; if (c * r >= (m + 1 + k + c)) { r--; } return r; }
3.68
framework_VFilterSelect_selectNextItem
/** * Selects the next item in the filtered selections. */ public void selectNextItem() { debug("VFS.SP: selectNextItem()"); final int index = menu.getSelectedIndex() + 1; if (menu.getItems().size() > index) { selectItem(menu.getItems().get(index)); } else { selectNextPage(); } }
3.68
hbase_KeyValueHeap_compare
/** * Compares two KeyValue * @return less than 0 if left is smaller, 0 if equal etc.. */ public int compare(Cell left, Cell right) { return this.kvComparator.compare(left, right); }
3.68
druid_WallConfig_setDescribeAllow
/** * set allow mysql describe statement * * @since 0.2.10 */ public void setDescribeAllow(boolean describeAllow) { this.describeAllow = describeAllow; }
3.68
framework_RefreshRenderedCellsOnlyIfAttached_getTicketNumber
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber() */ @Override protected Integer getTicketNumber() { return 9138; }
3.68
streampipes_OutputStrategies_append
/** * Creates a {@link org.apache.streampipes.model.output.AppendOutputStrategy}. Append output strategies add additional * properties to an input event stream. * * @param appendProperties An arbitrary number of event properties that are appended to any input stream. * @return AppendOutputStrategy */ public static AppendOutputStrategy append(EventProperty... appendProperties) { return new AppendOutputStrategy(Arrays.asList(appendProperties)); }
3.68
hadoop_JsonSerialization_toString
/** * Convert an instance to a string form for output. This is a robust * operation which will convert any JSON-generating exceptions into * error text. * @param instance non-null instance * @return a JSON string */ public String toString(T instance) { Preconditions.checkArgument(instance != null, "Null instance argument"); try { return toJson(instance); } catch (JsonProcessingException e) { return "Failed to convert to a string: " + e; } }
3.68
morf_SqlDialect_getAutoIncrementColumnForTable
/** * Scans the specified {@link Table} for any autonumbered columns and returns * that {@link Column} if it is found, or null otherwise. * * @param table The table to check. * @return The autonumber column, or null if none exists. */ protected Column getAutoIncrementColumnForTable(Table table) { for (Column column : table.columns()) { if (column.isAutoNumbered()) { return column; } } return null; }
3.68
framework_ListSorter_getComparator
/** * Retrieve the comparator assigned for a specific grid column. * * @param <C> * the column data type * @param column * a grid column. May not be null. * @return a comparator, or null if no comparator for the specified grid * column has been set. */ @SuppressWarnings("unchecked") public <C> Comparator<C> getComparator(Grid.Column<C, T> column) { if (column == null) { throw new IllegalArgumentException( "Column reference can not be null"); } return (Comparator<C>) comparators.get(column); }
3.68
framework_TableElement_getCollapseMenuToggle
/** * Gets the button that shows or hides the collapse menu. * * @return button for opening collapse menu */ public WebElement getCollapseMenuToggle() { return findElement(By.className("v-table-column-selector")); }
3.68
flink_RowtimeAttributeDescriptor_getAttributeName
/** Returns the name of the rowtime attribute. */ public String getAttributeName() { return attributeName; }
3.68
hbase_HFile_createReader
/** * @param fs filesystem * @param path Path to file to read * @param cacheConf This must not be null. * @param primaryReplicaReader true if this is a reader for primary replica * @param conf Configuration * @return an active Reader instance * @throws IOException Will throw a CorruptHFileException (DoNotRetryIOException subtype) if hfile * is corrupt/invalid. * @see CacheConfig#CacheConfig(Configuration) */ public static Reader createReader(FileSystem fs, Path path, CacheConfig cacheConf, boolean primaryReplicaReader, Configuration conf) throws IOException { Preconditions.checkNotNull(cacheConf, "Cannot create Reader with null CacheConf"); FSDataInputStreamWrapper stream = new FSDataInputStreamWrapper(fs, path); ReaderContext context = new ReaderContextBuilder().withFilePath(path).withInputStreamWrapper(stream) .withFileSize(fs.getFileStatus(path).getLen()).withFileSystem(stream.getHfs()) .withPrimaryReplicaReader(primaryReplicaReader).withReaderType(ReaderType.PREAD).build(); HFileInfo fileInfo = new HFileInfo(context, conf); Reader reader = createReader(context, fileInfo, cacheConf, conf); fileInfo.initMetaAndIndex(reader); return reader; }
3.68
flink_ClusterClient_listCompletedClusterDatasetIds
/** * Return a set of ids of the completed cluster datasets. * * @return A set of ids of the completely cached intermediate dataset. */ default CompletableFuture<Set<AbstractID>> listCompletedClusterDatasetIds() { return CompletableFuture.completedFuture(Collections.emptySet()); }
3.68
morf_SqlParameter_toString
/** * @see java.lang.Object#toString() */ @Override public String toString() { return MoreObjects.toStringHelper(this) .add("name", name) .add("scale", scale) .add("width", width) .add("type", type) .toString() + super.toString(); }
3.68
flink_TableFactoryService_discoverFactories
/** * Searches for factories using Java service providers. * * @return all factories in the classpath */ private static List<TableFactory> discoverFactories(Optional<ClassLoader> classLoader) { try { List<TableFactory> result = new LinkedList<>(); ClassLoader cl = classLoader.orElse(Thread.currentThread().getContextClassLoader()); ServiceLoader.load(TableFactory.class, cl).iterator().forEachRemaining(result::add); return result; } catch (ServiceConfigurationError e) { LOG.error("Could not load service provider for table factories.", e); throw new TableException("Could not load service provider for table factories.", e); } }
3.68
zxing_WhiteRectangleDetector_centerEdges
/** * recenters the points of a constant distance towards the center * * @param y bottom most point * @param z left most point * @param x right most point * @param t top most point * @return {@link ResultPoint}[] describing the corners of the rectangular * region. The first and last points are opposed on the diagonal, as * are the second and third. The first point will be the topmost * point and the last, the bottommost. The second point will be * leftmost and the third, the rightmost */ private ResultPoint[] centerEdges(ResultPoint y, ResultPoint z, ResultPoint x, ResultPoint t) { // // t t // z x // x OR z // y y // float yi = y.getX(); float yj = y.getY(); float zi = z.getX(); float zj = z.getY(); float xi = x.getX(); float xj = x.getY(); float ti = t.getX(); float tj = t.getY(); if (yi < width / 2.0f) { return new ResultPoint[]{ new ResultPoint(ti - CORR, tj + CORR), new ResultPoint(zi + CORR, zj + CORR), new ResultPoint(xi - CORR, xj - CORR), new ResultPoint(yi + CORR, yj - CORR)}; } else { return new ResultPoint[]{ new ResultPoint(ti + CORR, tj + CORR), new ResultPoint(zi + CORR, zj - CORR), new ResultPoint(xi - CORR, xj + CORR), new ResultPoint(yi - CORR, yj - CORR)}; } }
3.68
flink_BuiltInFunctionDefinition_name
/** * Specifies a name that uniquely identifies a built-in function. * * <p>Please adhere to the following naming convention: * * <ul> * <li>Use upper case and separate words with underscore. * <li>Depending on the importance of the function, the underscore is sometimes omitted * e.g. for {@code IFNULL} or {@code TYPEOF} but not for {@code TO_TIMESTAMP_LTZ}. * <li>Internal functions must start with $ and include a version starting from 1. The * following format is enforced: {@code $NAME$VERSION} such as {@code * $REPLICATE_ROWS$1}. * </ul> */ public Builder name(String name) { this.name = name; return this; }
3.68
hadoop_ManifestCommitter_isRecoverySupported
/** * Declare that task recovery is not supported. * It would be, if someone added the code *and tests*. * @param jobContext * Context of the job whose output is being written. * @return false, always * @throws IOException never */ @Override public boolean isRecoverySupported(final JobContext jobContext) throws IOException { LOG.info("Probe for isRecoverySupported({}): returning false", jobContext.getJobID()); return false; }
3.68
flink_HiveParserUtils_isRegex
/** * Returns whether the pattern is a regex expression (instead of a normal string). Normal string * is a string with all alphabets/digits and "_". */ public static boolean isRegex(String pattern, HiveConf conf) { String qIdSupport = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUOTEDID_SUPPORT); if ("column".equals(qIdSupport)) { return false; } for (int i = 0; i < pattern.length(); i++) { if (!Character.isLetterOrDigit(pattern.charAt(i)) && pattern.charAt(i) != '_') { return true; } } return false; }
3.68
dubbo_InjvmExporterListener_addExporterChangeListener
/** * Adds an ExporterChangeListener for a specific service, and notifies the listener of the current Exporter instance * <p> * if it exists. * * @param listener The ExporterChangeListener to add. * @param serviceKey The service key for the service to listen for changes on. */ public synchronized void addExporterChangeListener(ExporterChangeListener listener, String serviceKey) { exporterChangeListeners.putIfAbsent(serviceKey, new ConcurrentHashSet<>()); exporterChangeListeners.get(serviceKey).add(listener); if (exporters.get(serviceKey) != null) { Exporter<?> exporter = exporters.get(serviceKey); listener.onExporterChangeExport(exporter); } }
3.68
hbase_BaseSourceImpl_decGauge
/** * Decrease the value of a named gauge. * @param gaugeName The name of the gauge. * @param delta the ammount to subtract from a gauge value. */ @Override public void decGauge(String gaugeName, long delta) { MutableGaugeLong gaugeInt = metricsRegistry.getGauge(gaugeName, 0L); gaugeInt.decr(delta); }
3.68
hmily_HmilyLogo_logo
/** * Logo. */ public void logo() { String bannerText = buildBannerText(); if (LOGGER.isInfoEnabled()) { LOGGER.info(bannerText); } else { System.out.print(bannerText); } }
3.68
hbase_RegionServerAccounting_getRetainedRegionRWRequestsCnt
/** Returns the retained metrics of region's read and write requests count */ protected ConcurrentMap<String, Pair<Long, Long>> getRetainedRegionRWRequestsCnt() { return this.retainedRegionRWRequestsCnt; }
3.68
hadoop_ProgressSplitsBlock_burst
// this coordinates with LoggedTaskAttempt.SplitVectorKind int[][] burst() { int[][] result = new int[4][]; result[WALLCLOCK_TIME_INDEX] = progressWallclockTime.getValues(); result[CPU_TIME_INDEX] = progressCPUTime.getValues(); result[VIRTUAL_MEMORY_KBYTES_INDEX] = progressVirtualMemoryKbytes.getValues(); result[PHYSICAL_MEMORY_KBYTES_INDEX] = progressPhysicalMemoryKbytes.getValues(); return result; }
3.68
hbase_HRegionServer_getCompactSplitThread
/** Returns the underlying {@link CompactSplit} for the servers */ public CompactSplit getCompactSplitThread() { return this.compactSplitThread; }
3.68
morf_UpdateStatementBuilder_set
/** * Specifies the fields to set. * * @param destinationFields the fields to update in the database table * @return this, for method chaining. */ public UpdateStatementBuilder set(AliasedFieldBuilder... destinationFields) { this.fields.addAll(Builder.Helper.buildAll(Arrays.asList(destinationFields))); return this; }
3.68
rocketmq-connect_MqttSinkTask_start
/** * @param props */ @Override public void start(KeyValue props) { try { ConfigUtil.load(props, this.sinkConnectConfig); log.info("init data source success"); } catch (Exception e) { log.error("Cannot start MQTT Sink Task because of configuration error{}", e); } try { updater = new Updater(sinkConnectConfig); updater.start(); } catch (Throwable e) { log.error("fail to start updater{}", e); } }
3.68
hbase_BalanceRequest_isDryRun
/** * Returns true if the balancer should run in dry run mode, otherwise false. In dry run mode, * moves will be calculated but not executed. */ public boolean isDryRun() { return dryRun; }
3.68
morf_Function_round
/** * Rounding result for all of the below databases is equivalent to the Java RoundingMode#HALF_UP * for the datatypes which are currently in use at Alfa, where rounding is performed to the nearest integer * unless rounding 0.5 in which case the number is rounded up. Note that this is the rounding mode * commonly taught at school. * * <p>Example : 3.2 rounds to 3 and 3.5 rounds to 4.</p> * * <table> * <caption>Database rounding references</caption> * <tr><th>Database</th><th>Database Manual</th></tr> * <tr><td>Oracle</td><td>http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions135.htm</td></tr> * <tr><td>MySQL</td><td>http://dev.mysql.com/doc/refman/5.0/en/mathematical-functions.html#function_round</td></tr> * <tr><td>SQLServer</td><td>http://technet.microsoft.com/en-us/library/ms175003.aspx</td></tr> * <tr><td>Db2400</td><td>http://publib.boulder.ibm.com/infocenter/db2luw/v9/index.jsp?topic=%2Fcom.ibm.db2.udb.admin.doc%2Fdoc%2Fr0000845.htm</td></tr> * <tr><td>H2</td><td>http://www.h2database.com/html/functions.html#round</td></tr> * </table> * * @param expression the expression to evaluate * @param number an expression evaluating to the number of decimal places to round the expression to * @return an instance of the round function */ public static Function round(AliasedField expression, AliasedField number) { return new Function(FunctionType.ROUND, expression, number); }
3.68
hbase_RecoverableZooKeeper_setData
/** * setData is NOT an idempotent operation. Retry may cause BadVersion Exception Adding an * identifier field into the data to check whether badversion is caused by the result of previous * correctly setData * @return Stat instance */ public Stat setData(String path, byte[] data, int version) throws KeeperException, InterruptedException { final Span span = TraceUtil.createSpan("RecoverableZookeeper.setData"); try (Scope ignored = span.makeCurrent()) { RetryCounter retryCounter = retryCounterFactory.create(); byte[] newData = ZKMetadata.appendMetaData(id, data); boolean isRetry = false; while (true) { try { span.setStatus(StatusCode.OK); return checkZk().setData(path, newData, version); } catch (KeeperException e) { switch (e.code()) { case CONNECTIONLOSS: case OPERATIONTIMEOUT: case REQUESTTIMEOUT: TraceUtil.setError(span, e); retryOrThrow(retryCounter, e, "setData"); break; case BADVERSION: if (isRetry) { // try to verify whether the previous setData success or not try { Stat stat = new Stat(); byte[] revData = checkZk().getData(path, false, stat); if (Bytes.compareTo(revData, newData) == 0) { // the bad version is caused by previous successful setData return stat; } } catch (KeeperException keeperException) { // the ZK is not reliable at this moment. just throwing exception TraceUtil.setError(span, e); throw keeperException; } } // throw other exceptions and verified bad version exceptions default: TraceUtil.setError(span, e); throw e; } } retryCounter.sleepUntilNextRetry(); isRetry = true; } } finally { span.end(); } }
3.68
framework_AbstractContainer_fireContainerPropertySetChange
/** * Sends a Property set change event to all interested listeners. * * Use {@link #fireContainerPropertySetChange()} instead of this method * unless additional information about the exact changes is available and * should be included in the event. * * @param event * the property change event to send, optionally with additional * information */ protected void fireContainerPropertySetChange( Container.PropertySetChangeEvent event) { if (getPropertySetChangeListeners() != null) { for (Object l : getPropertySetChangeListeners().toArray()) { ((Container.PropertySetChangeListener) l) .containerPropertySetChange(event); } } }
3.68
hbase_MasterObserver_postCompletedModifyTableAction
/** * Called after to modifying a table's properties. Called as part of modify table procedure and it * is async to the modify table RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param oldDescriptor descriptor of table before modify operation happened * @param currentDescriptor current TableDescriptor of the table */ default void postCompletedModifyTableAction( final ObserverContext<MasterCoprocessorEnvironment> ctx, final TableName tableName, final TableDescriptor oldDescriptor, final TableDescriptor currentDescriptor) throws IOException { }
3.68
flink_FinalizeOnMaster_finalizeGlobal
/** * The method is invoked on the master (JobManager) after all (parallel) instances of an * OutputFormat finished. * * @param context The context to get finalization infos. * @throws IOException The finalization may throw exceptions, which may cause the job to abort. */ default void finalizeGlobal(FinalizationContext context) throws IOException { finalizeGlobal(context.getParallelism()); }
3.68
flink_SharedResourceHolder_release
/** * Releases an instance of the given resource. * * <p>The instance must have been obtained from {@link #get(Resource)}. Otherwise will throw * IllegalArgumentException. * * <p>Caller must not release a reference more than once. It's advisory that you clear the * reference to the instance with the null returned by this method. * * @param resource the singleton Resource object that identifies the released static resource * @param instance the released static resource * @return a null which the caller can use to clear the reference to that instance. */ public static <T> T release(final Resource<T> resource, final T instance) { return holder.releaseInternal(resource, instance); }
3.68
flink_WindowTrigger_triggerTime
/** * Returns the trigger time of the window, this should be called after TriggerContext * initialized. */ protected long triggerTime(W window) { return toEpochMillsForTimer(window.maxTimestamp(), ctx.getShiftTimeZone()); }
3.68
hbase_QuotaSettingsFactory_throttleRegionServer
/** * Throttle the specified region server. * @param regionServer the region server to throttle * @param type the type of throttling * @param limit the allowed number of request/data per timeUnit * @param timeUnit the limit time unit * @return the quota settings */ public static QuotaSettings throttleRegionServer(final String regionServer, final ThrottleType type, final long limit, final TimeUnit timeUnit) { return throttle(null, null, null, regionServer, type, limit, timeUnit, QuotaScope.MACHINE); }
3.68
hbase_ColumnSchemaModel___getVersions
/** Returns the value of the VERSIONS attribute or its default if it is unset */ public int __getVersions() { Object o = attrs.get(VERSIONS); return o != null ? Integer.parseInt(o.toString()) : ColumnFamilyDescriptorBuilder.DEFAULT_MAX_VERSIONS; }
3.68
flink_DataSet_crossWithTiny
/** * Initiates a Cross transformation. * * <p>A Cross transformation combines the elements of two {@link DataSet DataSets} into one * DataSet. It builds all pair combinations of elements of both DataSets, i.e., it builds a * Cartesian product. This method also gives the hint to the optimizer that the second DataSet * to cross is much smaller than the first one. * * <p>The resulting {@link org.apache.flink.api.java.operators.CrossOperator.DefaultCross} wraps * each pair of crossed elements into a {@link Tuple2}, with the element of the first input * being the first field of the tuple and the element of the second input being the second field * of the tuple. * * <p>Call {@link * org.apache.flink.api.java.operators.CrossOperator.DefaultCross#with(org.apache.flink.api.common.functions.CrossFunction)} * to define a {@link org.apache.flink.api.common.functions.CrossFunction} which is called for * each pair of crossed elements. The CrossFunction returns a exactly one element for each pair * of input elements. * * @param other The other DataSet with which this DataSet is crossed. * @return A DefaultCross that returns a Tuple2 for each pair of crossed elements. * @see org.apache.flink.api.java.operators.CrossOperator.DefaultCross * @see org.apache.flink.api.common.functions.CrossFunction * @see DataSet * @see Tuple2 */ public <R> CrossOperator.DefaultCross<T, R> crossWithTiny(DataSet<R> other) { return new CrossOperator.DefaultCross<>( this, other, CrossHint.SECOND_IS_SMALL, Utils.getCallLocationName()); }
3.68
framework_VaadinPortletSession_getPortletConfig
/** * Returns the JSR-286 portlet configuration that provides access to the * portlet context and init parameters. * * @return portlet configuration */ public PortletConfig getPortletConfig() { VaadinPortletResponse response = (VaadinPortletResponse) CurrentInstance .get(VaadinResponse.class); return response.getService().getPortlet().getPortletConfig(); }
3.68
flink_Execution_fail
/** * This method fails the vertex due to an external condition. The task will move to state * FAILED. If the task was in state RUNNING or DEPLOYING before, it will send a cancel call to * the TaskManager. * * @param t The exception that caused the task to fail. */ @Override public void fail(Throwable t) { processFail(t, true); }
3.68
hadoop_MultipleOutputFormat_getRecordWriter
/** * Create a composite record writer that can write key/value data to different * output files * * @param fs * the file system to use * @param job * the job conf for the job * @param name * the leaf file name for the output file (such as part-00000") * @param arg3 * a progressable for reporting progress. * @return a composite record writer * @throws IOException */ public RecordWriter<K, V> getRecordWriter(FileSystem fs, JobConf job, String name, Progressable arg3) throws IOException { final FileSystem myFS = fs; final String myName = generateLeafFileName(name); final JobConf myJob = job; final Progressable myProgressable = arg3; return new RecordWriter<K, V>() { // a cache storing the record writers for different output files. TreeMap<String, RecordWriter<K, V>> recordWriters = new TreeMap<String, RecordWriter<K, V>>(); public void write(K key, V value) throws IOException { // get the file name based on the key String keyBasedPath = generateFileNameForKeyValue(key, value, myName); // get the file name based on the input file name String finalPath = getInputFileBasedOutputFileName(myJob, keyBasedPath); // get the actual key K actualKey = generateActualKey(key, value); V actualValue = generateActualValue(key, value); RecordWriter<K, V> rw = this.recordWriters.get(finalPath); if (rw == null) { // if we don't have the record writer yet for the final path, create // one // and add it to the cache rw = getBaseRecordWriter(myFS, myJob, finalPath, myProgressable); this.recordWriters.put(finalPath, rw); } rw.write(actualKey, actualValue); }; public void close(Reporter reporter) throws IOException { Iterator<String> keys = this.recordWriters.keySet().iterator(); while (keys.hasNext()) { RecordWriter<K, V> rw = this.recordWriters.get(keys.next()); rw.close(reporter); } this.recordWriters.clear(); }; }; }
3.68
flink_DistributedCache_parseCachedFilesFromString
/** * Parses a list of distributed cache entries encoded in a string. Can be used to parse a config * option described by {@link org.apache.flink.configuration.PipelineOptions#CACHED_FILES}. * * <p>See {@link org.apache.flink.configuration.PipelineOptions#CACHED_FILES} for the format. * * @param files List of string encoded distributed cache entries. */ public static List<Tuple2<String, DistributedCacheEntry>> parseCachedFilesFromString( List<String> files) { return files.stream() .map(ConfigurationUtils::parseMap) .map( m -> Tuple2.of( m.get("name"), new DistributedCacheEntry( m.get("path"), Optional.ofNullable(m.get("executable")) .map(Boolean::parseBoolean) .orElse(false)))) .collect(Collectors.toList()); }
3.68
hbase_RemoteProcedureDispatcher_addNode
// ============================================================================================ // Node Helpers // ============================================================================================ /** * Add a node that will be able to execute remote procedures * @param key the node identifier */ public void addNode(final TRemote key) { assert key != null : "Tried to add a node with a null key"; nodeMap.computeIfAbsent(key, k -> new BufferNode(k)); }
3.68
morf_UpgradeTableResolutionVisitor_getResolvedTables
/** * @return tables resolved by this visitor */ public ResolvedTables getResolvedTables() { return resolvedTables; }
3.68
hudi_MetadataPartitionType_getMetadataPartitionsNeedingWriteStatusTracking
/** * Returns the list of metadata table partitions which require WriteStatus to track written records. * <p> * These partitions need the list of written records so that they can update their metadata. */ public static List<MetadataPartitionType> getMetadataPartitionsNeedingWriteStatusTracking() { return Collections.singletonList(MetadataPartitionType.RECORD_INDEX); }
3.68
hadoop_CosNFileSystem_append
/** * This optional operation is not yet supported. */ @Override public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException { throw new IOException("Not supported"); }
3.68
querydsl_BeanMap_entrySet
/** * Gets a Set of MapEntry objects that are the mappings for this BeanMap. * <p> * Each MapEntry can be set but not removed. * * @return the unmodifiable set of mappings */ @Override public Set<Entry<String, Object>> entrySet() { return new AbstractSet<Entry<String, Object>>() { @Override public Iterator<Entry<String, Object>> iterator() { return entryIterator(); } @Override public int size() { return BeanMap.this.readMethods.size(); } }; }
3.68
flink_RestartStrategies_exponentialDelayRestart
/** * Generates a ExponentialDelayRestartStrategyConfiguration. * * @param initialBackoff Starting duration between restarts * @param maxBackoff The highest possible duration between restarts * @param backoffMultiplier Delay multiplier how many times is the delay longer than before * @param resetBackoffThreshold How long the job must run smoothly to reset the time interval * @param jitterFactor How much the delay may differ (in percentage) */ public static ExponentialDelayRestartStrategyConfiguration exponentialDelayRestart( Time initialBackoff, Time maxBackoff, double backoffMultiplier, Time resetBackoffThreshold, double jitterFactor) { return new ExponentialDelayRestartStrategyConfiguration( initialBackoff, maxBackoff, backoffMultiplier, resetBackoffThreshold, jitterFactor); }
3.68
flink_CollectionUtil_computeRequiredCapacity
/** * Helper method to compute the right capacity for a hash map with load factor * HASH_MAP_DEFAULT_LOAD_FACTOR. */ @VisibleForTesting static int computeRequiredCapacity(int expectedSize, float loadFactor) { Preconditions.checkArgument(expectedSize >= 0); Preconditions.checkArgument(loadFactor > 0f); if (expectedSize <= 2) { return expectedSize + 1; } return expectedSize < (Integer.MAX_VALUE / 2 + 1) ? (int) Math.ceil(expectedSize / loadFactor) : Integer.MAX_VALUE; }
3.68
framework_Slot_setRelativeWidth
/** * Set if the slot has a relative width. * * @param relativeWidth * True if slot uses relative width, false if the slot has a * static width */ public void setRelativeWidth(boolean relativeWidth) { this.relativeWidth = relativeWidth; updateRelativeSize(relativeWidth, "width"); }
3.68
framework_Video_getPoster
/** * @return The poster image. */ public Resource getPoster() { return getResource(VideoConstants.POSTER_RESOURCE); }
3.68
flink_NullableSerializer_checkIfNullSupported
/** * This method checks if {@code serializer} supports {@code null} value. * * @param serializer serializer to check */ public static <T> boolean checkIfNullSupported(@Nonnull TypeSerializer<T> serializer) { int length = serializer.getLength() > 0 ? serializer.getLength() : 1; DataOutputSerializer dos = new DataOutputSerializer(length); try { serializer.serialize(null, dos); } catch (IOException | RuntimeException e) { return false; } checkArgument( serializer.getLength() < 0 || serializer.getLength() == dos.getCopyOfBuffer().length, "The serialized form of the null value should have the same length " + "as any other if the length is fixed in the serializer"); DataInputDeserializer dis = new DataInputDeserializer(dos.getSharedBuffer()); try { checkArgument(serializer.deserialize(dis) == null); } catch (IOException e) { throw new RuntimeException( String.format( "Unexpected failure to deserialize just serialized null value with %s", serializer.getClass().getName()), e); } checkArgument( serializer.copy(null) == null, "Serializer %s has to be able properly copy null value if it can serialize it", serializer.getClass().getName()); return true; }
3.68
hbase_StoreScanner_trySkipToNextColumn
/** * See {@link org.apache.hadoop.hbase.regionserver.StoreScanner#trySkipToNextRow(Cell)} * @param cell current cell * @return true means skip to next column, false means not */ protected boolean trySkipToNextColumn(Cell cell) throws IOException { Cell nextCell = null; // used to guard against a changed next indexed key by doing a identity comparison // when the identity changes we need to compare the bytes again Cell previousIndexedKey = null; do { Cell nextIndexedKey = getNextIndexedKey(); if ( nextIndexedKey != null && nextIndexedKey != KeyValueScanner.NO_NEXT_INDEXED_KEY && (nextIndexedKey == previousIndexedKey || matcher.compareKeyForNextColumn(nextIndexedKey, cell) >= 0) ) { this.heap.next(); ++kvsScanned; previousIndexedKey = nextIndexedKey; } else { return false; } } while ((nextCell = this.heap.peek()) != null && CellUtil.matchingRowColumn(cell, nextCell)); // We need this check because it may happen that the new scanner that we get // during heap.next() is requiring reseek due of fake KV previously generated for // ROWCOL bloom filter optimization. See HBASE-19863 for more details if ( useRowColBloom && nextCell != null && cell.getTimestamp() == PrivateConstants.OLDEST_TIMESTAMP ) { return false; } return true; }
3.68
dubbo_StringUtils_isNotEmpty
/** * is not empty string. * * @param str source string. * @return is not empty. */ public static boolean isNotEmpty(String str) { return !isEmpty(str); }
3.68
hadoop_DataNodeFaultInjector_delayWhenOfferServiceHoldLock
/** * Used as a hook to inject intercept when BPOfferService hold lock. */ public void delayWhenOfferServiceHoldLock() {}
3.68
hadoop_ExtensionHelper_getCanonicalServiceName
/** * Invoke {@link BoundDTExtension#getCanonicalServiceName()} or * return the default value. * @param extension extension to invoke * @param def default if the class is of the wrong type. * @return a canonical service name. */ public static String getCanonicalServiceName(Object extension, String def) { return ifBoundDTExtension(extension, BoundDTExtension::getCanonicalServiceName) .orElse(def); }
3.68
flink_InputFormatSourceFunction_getFormat
/** * Returns the {@code InputFormat}. This is only needed because we need to set the input split * assigner on the {@code StreamGraph}. */ public InputFormat<OUT, InputSplit> getFormat() { return format; }
3.68
flink_JavaFieldPredicates_isNotStatic
/** * Match none static modifier of the {@link JavaField}. * * @return A {@link DescribedPredicate} returning true, if and only if the tested {@link * JavaField} has no static modifier. */ public static DescribedPredicate<JavaField> isNotStatic() { return DescribedPredicate.describe( "not static", field -> !field.getModifiers().contains(JavaModifier.STATIC)); }
3.68
hudi_AvroSchemaCompatibility_getResult
/** * Gets more details about the compatibility, in particular if getType() is * INCOMPATIBLE. * * @return the details of this compatibility check. */ public SchemaCompatibilityResult getResult() { return mResult; }
3.68
hudi_DatePartitionPathSelector_pruneDatePartitionPaths
/** * Prunes date level partitions to last few days configured by 'NUM_PREV_DAYS_TO_LIST' from * 'CURRENT_DATE'. Parallelizes listing by leveraging HoodieSparkEngineContext's methods. */ public List<String> pruneDatePartitionPaths(HoodieSparkEngineContext context, FileSystem fs, String rootPath, LocalDate currentDate) { List<String> partitionPaths = new ArrayList<>(); // get all partition paths before date partition level partitionPaths.add(rootPath); if (datePartitionDepth <= 0) { return partitionPaths; } SerializableConfiguration serializedConf = new SerializableConfiguration(fs.getConf()); for (int i = 0; i < datePartitionDepth; i++) { partitionPaths = context.flatMap(partitionPaths, path -> { Path subDir = new Path(path); FileSystem fileSystem = subDir.getFileSystem(serializedConf.get()); // skip files/dirs whose names start with (_, ., etc) FileStatus[] statuses = fileSystem.listStatus(subDir, file -> IGNORE_FILEPREFIX_LIST.stream().noneMatch(pfx -> file.getName().startsWith(pfx))); List<String> res = new ArrayList<>(); for (FileStatus status : statuses) { res.add(status.getPath().toString()); } return res.stream(); }, partitionsListParallelism); } // Prune date partitions to last few days return context.getJavaSparkContext().parallelize(partitionPaths, partitionsListParallelism) .filter(s -> { LocalDate fromDate = currentDate.minusDays(numPrevDaysToList); String[] splits = s.split("/"); String datePartition = splits[splits.length - 1]; LocalDate partitionDate; DateTimeFormatter dateFormatter = DateTimeFormatter.ofPattern(dateFormat); if (datePartition.contains("=")) { String[] moreSplit = datePartition.split("="); ValidationUtils.checkArgument( moreSplit.length == 2, "Partition Field (" + datePartition + ") not in expected format"); partitionDate = LocalDate.parse(moreSplit[1], dateFormatter); } else { partitionDate = LocalDate.parse(datePartition, dateFormatter); } return (partitionDate.isEqual(fromDate) || partitionDate.isAfter(fromDate)) && (partitionDate.isEqual(currentDate) || partitionDate.isBefore(currentDate)); }).collect(); }
3.68
hmily_HmilySafeNumberOperationUtils_safeEquals
/** * Execute collection equals method by safe mode. * * @param sourceCollection source collection * @param targetCollection target collection * @return whether the element in source collection and target collection are all same */ public static boolean safeEquals(final Collection<Comparable<?>> sourceCollection, final Collection<Comparable<?>> targetCollection) { List<Comparable<?>> collection = Lists.newArrayList(sourceCollection); collection.addAll(targetCollection); Class<?> clazz = getTargetNumericType(collection); if (null == clazz) { return sourceCollection.equals(targetCollection); } List<Comparable<?>> sourceClazzCollection = sourceCollection.stream().map(number -> parseNumberByClazz(number.toString(), clazz)).collect(Collectors.toList()); List<Comparable<?>> targetClazzCollection = targetCollection.stream().map(number -> parseNumberByClazz(number.toString(), clazz)).collect(Collectors.toList()); return sourceClazzCollection.equals(targetClazzCollection); }
3.68
querydsl_NumberExpression_goe
/** * Create a {@code this >= right} expression * * @param <A> * @param right rhs of the comparison * @return {@code this >= right} * @see java.lang.Comparable#compareTo(Object) */ public final <A extends Number & Comparable<?>> BooleanExpression goe(Expression<A> right) { return Expressions.booleanOperation(Ops.GOE, mixin, right); }
3.68
flink_BaseHybridHashTable_nextSegment
/** * This is the method called by the partitions to request memory to serialize records. It * automatically spills partitions, if memory runs out. * * @return The next available memory segment. */ @Override public MemorySegment nextSegment() { final MemorySegment seg = getNextBuffer(); if (seg != null) { return seg; } else { try { spillPartition(); } catch (IOException ioex) { throw new RuntimeException( "Error spilling Hash Join Partition" + (ioex.getMessage() == null ? "." : ": " + ioex.getMessage()), ioex); } MemorySegment fromSpill = getNextBuffer(); if (fromSpill == null) { throw new RuntimeException( "BUG in Hybrid Hash Join: Spilling did not free a buffer."); } else { return fromSpill; } } }
3.68
hbase_HBaseTestingUtility_waitUntilAllRegionsAssigned
/** * Wait until all regions for a table in hbase:meta have a non-empty info:server, or until * timeout. This means all regions have been deployed, master has been informed and updated * hbase:meta with the regions deployed server. * @param tableName the table name * @param timeout timeout, in milliseconds */ public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout) throws IOException { if (!TableName.isMetaTableName(tableName)) { try (final Table meta = getConnection().getTable(TableName.META_TABLE_NAME)) { LOG.debug("Waiting until all regions of table " + tableName + " get assigned. Timeout = " + timeout + "ms"); waitFor(timeout, 200, true, new ExplainingPredicate<IOException>() { @Override public String explainFailure() throws IOException { return explainTableAvailability(tableName); } @Override public boolean evaluate() throws IOException { Scan scan = new Scan(); scan.addFamily(HConstants.CATALOG_FAMILY); boolean tableFound = false; try (ResultScanner s = meta.getScanner(scan)) { for (Result r; (r = s.next()) != null;) { byte[] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); RegionInfo info = RegionInfo.parseFromOrNull(b); if (info != null && info.getTable().equals(tableName)) { // Get server hosting this region from catalog family. Return false if no server // hosting this region, or if the server hosting this region was recently killed // (for fault tolerance testing). tableFound = true; byte[] server = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); if (server == null) { return false; } else { byte[] startCode = r.getValue(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER); ServerName serverName = ServerName.valueOf(Bytes.toString(server).replaceFirst(":", ",") + "," + Bytes.toLong(startCode)); if ( !getHBaseClusterInterface().isDistributedCluster() && getHBaseCluster().isKilledRS(serverName) ) { return false; } } if (RegionStateStore.getRegionState(r, info) != RegionState.State.OPEN) { return false; } } } } if (!tableFound) { LOG.warn( "Didn't find the entries for table " + tableName + " in meta, already deleted?"); } return tableFound; } }); } } LOG.info("All regions for table " + tableName + " assigned to meta. Checking AM states."); // check from the master state if we are using a mini cluster if (!getHBaseClusterInterface().isDistributedCluster()) { // So, all regions are in the meta table but make sure master knows of the assignments before // returning -- sometimes this can lag. HMaster master = getHBaseCluster().getMaster(); final RegionStates states = master.getAssignmentManager().getRegionStates(); waitFor(timeout, 200, new ExplainingPredicate<IOException>() { @Override public String explainFailure() throws IOException { return explainTableAvailability(tableName); } @Override public boolean evaluate() throws IOException { List<RegionInfo> hris = states.getRegionsOfTable(tableName); return hris != null && !hris.isEmpty(); } }); } LOG.info("All regions for table " + tableName + " assigned."); }
3.68
hibernate-validator_ValidatorImpl_validateCascadedConstraints
/** * Validates all cascaded constraints for the given bean using the current group set in the execution context. * This method must always be called after validateConstraints for the same context. * * @param validationContext The execution context * @param valueContext Collected information for single validation */ private void validateCascadedConstraints(BaseBeanValidationContext<?> validationContext, ValueContext<?, Object> valueContext) { Validatable validatable = valueContext.getCurrentValidatable(); BeanValueContext.ValueState<Object> originalValueState = valueContext.getCurrentValueState(); for ( Cascadable cascadable : validatable.getCascadables() ) { valueContext.appendNode( cascadable ); if ( isCascadeRequired( validationContext, valueContext.getCurrentBean(), valueContext.getPropertyPath(), cascadable.getConstraintLocationKind() ) ) { Object value = getCascadableValue( validationContext, valueContext.getCurrentBean(), cascadable ); CascadingMetaData cascadingMetaData = cascadable.getCascadingMetaData(); if ( value != null ) { CascadingMetaData effectiveCascadingMetaData = cascadingMetaData.addRuntimeContainerSupport( valueExtractorManager, value.getClass() ); // validate cascading on the annotated object if ( effectiveCascadingMetaData.isCascading() ) { validateCascadedAnnotatedObjectForCurrentGroup( value, validationContext, valueContext, effectiveCascadingMetaData ); } if ( effectiveCascadingMetaData.isContainer() ) { ContainerCascadingMetaData containerCascadingMetaData = effectiveCascadingMetaData.as( ContainerCascadingMetaData.class ); if ( containerCascadingMetaData.hasContainerElementsMarkedForCascading() ) { // validate cascading on the container elements validateCascadedContainerElementsForCurrentGroup( value, validationContext, valueContext, containerCascadingMetaData.getContainerElementTypesCascadingMetaData() ); } } } } // reset the value context valueContext.resetValueState( originalValueState ); } }
3.68
pulsar_ProducerImpl_recoverProcessOpSendMsgFrom
// Must acquire a lock on ProducerImpl.this before calling method. private void recoverProcessOpSendMsgFrom(ClientCnx cnx, MessageImpl from, long expectedEpoch) { if (expectedEpoch != this.connectionHandler.getEpoch() || cnx() == null) { // In this case, the cnx passed to this method is no longer the active connection. This method will get // called again once the new connection registers the producer with the broker. log.info("[{}][{}] Producer epoch mismatch or the current connection is null. Skip re-sending the " + " {} pending messages since they will deliver using another connection.", topic, producerName, pendingMessages.messagesCount()); return; } final boolean stripChecksum = cnx.getRemoteEndpointProtocolVersion() < brokerChecksumSupportedVersion(); Iterator<OpSendMsg> msgIterator = pendingMessages.iterator(); OpSendMsg pendingRegisteringOp = null; while (msgIterator.hasNext()) { OpSendMsg op = msgIterator.next(); if (from != null) { if (op.msg == from) { from = null; } else { continue; } } if (op.msg != null) { if (op.msg.getSchemaState() == None) { if (!rePopulateMessageSchema(op.msg)) { pendingRegisteringOp = op; break; } } else if (op.msg.getSchemaState() == Broken) { op.recycle(); msgIterator.remove(); continue; } } if (op.cmd == null) { checkState(op.rePopulate != null); op.rePopulate.run(); if (isMessageSizeExceeded(op)) { continue; } } if (stripChecksum) { stripChecksum(op); } op.cmd.retain(); if (log.isDebugEnabled()) { log.debug("[{}] [{}] Re-Sending message in cnx {}, sequenceId {}", topic, producerName, cnx.channel(), op.sequenceId); } cnx.ctx().write(op.cmd, cnx.ctx().voidPromise()); op.updateSentTimestamp(); stats.updateNumMsgsSent(op.numMessagesInBatch, op.batchSizeByte); } cnx.ctx().flush(); if (!changeToReadyState()) { // Producer was closed while reconnecting, close the connection to make sure the broker // drops the producer on its side cnx.channel().close(); return; } // If any messages were enqueued while the producer was not Ready, we would have skipped // scheduling the batch flush task. Schedule it now, if there are messages in the batch container. if (isBatchMessagingEnabled() && !batchMessageContainer.isEmpty()) { maybeScheduleBatchFlushTask(); } if (pendingRegisteringOp != null) { tryRegisterSchema(cnx, pendingRegisteringOp.msg, pendingRegisteringOp.callback, expectedEpoch); } }
3.68
flink_FlinkZooKeeperQuorumPeer_setRequiredProperties
/** Sets required properties to reasonable defaults and logs it. */ private static void setRequiredProperties(Properties zkProps) { // Set default client port if (zkProps.getProperty("clientPort") == null) { zkProps.setProperty("clientPort", String.valueOf(DEFAULT_ZOOKEEPER_CLIENT_PORT)); LOG.warn("No 'clientPort' configured. Set to '{}'.", DEFAULT_ZOOKEEPER_CLIENT_PORT); } // Set default init limit if (zkProps.getProperty("initLimit") == null) { zkProps.setProperty("initLimit", String.valueOf(DEFAULT_ZOOKEEPER_INIT_LIMIT)); LOG.warn("No 'initLimit' configured. Set to '{}'.", DEFAULT_ZOOKEEPER_INIT_LIMIT); } // Set default sync limit if (zkProps.getProperty("syncLimit") == null) { zkProps.setProperty("syncLimit", String.valueOf(DEFAULT_ZOOKEEPER_SYNC_LIMIT)); LOG.warn("No 'syncLimit' configured. Set to '{}'.", DEFAULT_ZOOKEEPER_SYNC_LIMIT); } // Set default data dir if (zkProps.getProperty("dataDir") == null) { String dataDir = String.format( "%s/%s/zookeeper", System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString()); zkProps.setProperty("dataDir", dataDir); LOG.warn("No 'dataDir' configured. Set to '{}'.", dataDir); } int peerPort = DEFAULT_ZOOKEEPER_PEER_PORT; int leaderPort = DEFAULT_ZOOKEEPER_LEADER_PORT; // Set peer and leader ports if none given, because ZooKeeper complains if multiple // servers are configured, but no ports are given. for (Map.Entry<Object, Object> entry : zkProps.entrySet()) { String key = (String) entry.getKey(); if (entry.getKey().toString().startsWith("server.")) { String value = (String) entry.getValue(); String[] parts = value.split(":"); if (parts.length == 1) { String address = String.format("%s:%d:%d", parts[0], peerPort, leaderPort); zkProps.setProperty(key, address); LOG.info( "Set peer and leader port of '{}': '{}' => '{}'.", key, value, address); } else if (parts.length == 2) { String address = String.format( "%s:%d:%d", parts[0], Integer.valueOf(parts[1]), leaderPort); zkProps.setProperty(key, address); LOG.info("Set peer port of '{}': '{}' => '{}'.", key, value, address); } } } }
3.68
morf_NamedParameterPreparedStatement_createForQueryOn
/** * Create the prepared statement against the specified connection. The prepared statement * can only be used to run queries (not updates/inserts etc) and is strictly read only * and forward only. For use in bulk data loads. * * @param connection the connection * @return the prepared statement. * @throws SQLException if the statement could not be created */ public NamedParameterPreparedStatement createForQueryOn(Connection connection) throws SQLException { return new NamedParameterPreparedStatement(connection, query, indexMap, true, this); }
3.68
hmily_SubCoordinator_addSynchronization
/** * Add synchronization boolean. * * @param synchronization the synchronization * @throws RollbackException the rollback exception */ public synchronized void addSynchronization(final Synchronization synchronization) throws RollbackException { if (state == XaState.STATUS_ACTIVE) { synchronizations.add(synchronization); return; } if (state == XaState.STATUS_MARKED_ROLLBACK || state == XaState.STATUS_ROLLEDBACK) { synchronizations.add(synchronization); throw new RollbackException(); } }
3.68
flink_SSLUtils_createInternalClientSSLEngineFactory
/** Creates a SSLEngineFactory to be used by internal communication client endpoints. */ public static SSLHandlerFactory createInternalClientSSLEngineFactory(final Configuration config) throws Exception { SslContext sslContext = createInternalNettySSLContext(config, true); if (sslContext == null) { throw new IllegalConfigurationException( "SSL is not enabled for internal communication."); } return new SSLHandlerFactory( sslContext, config.getInteger(SecurityOptions.SSL_INTERNAL_HANDSHAKE_TIMEOUT), config.getInteger(SecurityOptions.SSL_INTERNAL_CLOSE_NOTIFY_FLUSH_TIMEOUT)); }
3.68
hbase_Threads_setLoggingUncaughtExceptionHandler
/** * Sets an UncaughtExceptionHandler for the thread which logs the Exception stack if the thread * dies. */ public static void setLoggingUncaughtExceptionHandler(Thread t) { t.setUncaughtExceptionHandler(LOGGING_EXCEPTION_HANDLER); }
3.68
flink_SlotProfile_priorAllocation
/** * Returns a slot profile for the given resource profile, prior allocations and all prior * allocation ids from the whole execution graph. * * @param taskResourceProfile specifying the required resources for the task slot * @param physicalSlotResourceProfile specifying the required resources for the physical slot to * host this task slot * @param preferredLocations specifying the preferred locations * @param priorAllocations specifying the prior allocations * @param reservedAllocations specifying all reserved allocations * @return Slot profile with all the given information */ public static SlotProfile priorAllocation( final ResourceProfile taskResourceProfile, final ResourceProfile physicalSlotResourceProfile, final Collection<TaskManagerLocation> preferredLocations, final Collection<AllocationID> priorAllocations, final Set<AllocationID> reservedAllocations) { return new SlotProfile( taskResourceProfile, physicalSlotResourceProfile, preferredLocations, priorAllocations, reservedAllocations); }
3.68
zxing_CaptureActivity_drawResultPoints
/** * Superimpose a line for 1D or dots for 2D to highlight the key features of the barcode. * * @param barcode A bitmap of the captured image. * @param scaleFactor amount by which thumbnail was scaled * @param rawResult The decoded results which contains the points to draw. */ private void drawResultPoints(Bitmap barcode, float scaleFactor, Result rawResult) { ResultPoint[] points = rawResult.getResultPoints(); if (points != null && points.length > 0) { Canvas canvas = new Canvas(barcode); Paint paint = new Paint(); paint.setColor(getResources().getColor(R.color.result_points)); if (points.length == 2) { paint.setStrokeWidth(4.0f); drawLine(canvas, paint, points[0], points[1], scaleFactor); } else if (points.length == 4 && (rawResult.getBarcodeFormat() == BarcodeFormat.UPC_A || rawResult.getBarcodeFormat() == BarcodeFormat.EAN_13)) { // Hacky special case -- draw two lines, for the barcode and metadata drawLine(canvas, paint, points[0], points[1], scaleFactor); drawLine(canvas, paint, points[2], points[3], scaleFactor); } else { paint.setStrokeWidth(10.0f); for (ResultPoint point : points) { if (point != null) { canvas.drawPoint(scaleFactor * point.getX(), scaleFactor * point.getY(), paint); } } } } }
3.68
hadoop_UnmanagedApplicationManager_registerApplicationMaster
/** * Registers this {@link UnmanagedApplicationManager} with the resource * manager. * * @param request RegisterApplicationMasterRequest * @return register response * @throws YarnException if register fails * @throws IOException if register fails */ public RegisterApplicationMasterResponse registerApplicationMaster( RegisterApplicationMasterRequest request) throws YarnException, IOException { // Save the register request for re-register later this.registerRequest = request; LOG.info("Registering the Unmanaged application master {}", this.applicationId); RegisterApplicationMasterResponse response = this.rmProxyRelayer.registerApplicationMaster(this.registerRequest); this.heartbeatHandler.resetLastResponseId(); if (LOG.isDebugEnabled()) { for (Container container : response.getContainersFromPreviousAttempts()) { LOG.debug("RegisterUAM returned existing running container {}", container.getId()); } for (NMToken nmToken : response.getNMTokensFromPreviousAttempts()) { LOG.debug("RegisterUAM returned existing NM token for node {}", nmToken.getNodeId()); } } LOG.info("RegisterUAM returned {} existing running container and {} NM tokens", response.getContainersFromPreviousAttempts().size(), response.getNMTokensFromPreviousAttempts().size()); // Only when register succeed that we start the heartbeat thread this.heartbeatHandler.setDaemon(true); this.heartbeatHandler.start(); return response; }
3.68
pulsar_ProtocolHandlers_protocol
/** * Return the handler for the provided <tt>protocol</tt>. * * @param protocol the protocol to use * @return the protocol handler to handle the provided protocol */ public ProtocolHandler protocol(String protocol) { ProtocolHandlerWithClassLoader h = handlers.get(protocol); if (null == h) { return null; } else { return h.getHandler(); } }
3.68
hmily_ConfigLoader_with
/** * With context. * * @param sources the sources * @param original the original * @return the context. */ public Context with(final List<PropertyKeySource<?>> sources, final ConfigLoader<Config> original) { return new Context(original, sources); }
3.68
hudi_HoodieIndexUtils_tagRecord
/** * Tag the record to an existing location. Not creating any new instance. */ public static <R> HoodieRecord<R> tagRecord(HoodieRecord<R> record, HoodieRecordLocation location) { record.unseal(); record.setCurrentLocation(location); record.seal(); return record; }
3.68
flink_BufferConsumer_copyWithReaderPosition
/** * Returns a retained copy with separate indexes and sets the reader position to the given * value. This allows to read from the same {@link MemorySegment} twice starting from the * supplied position. * * @param readerPosition the new reader position. Can be less than the {@link * #currentReaderPosition}, but may not exceed the current writer's position. * @return a retained copy of self with separate indexes */ public BufferConsumer copyWithReaderPosition(int readerPosition) { return new BufferConsumer( buffer.retainBuffer(), writerPosition.positionMarker, readerPosition); }
3.68
hadoop_SimpleBufferedOutputStream_size
// Get the size of internal buffer being used. public int size() { return count; }
3.68
hbase_QuotaUtil_updateClusterQuotaToMachineQuota
/** * Convert cluster scope quota to machine scope quota * @param quotas the original quota * @param factor factor used to divide cluster limiter to machine limiter * @return the converted quota whose quota limiters all in machine scope */ private static Quotas updateClusterQuotaToMachineQuota(Quotas quotas, double factor) { Quotas.Builder newQuotas = Quotas.newBuilder(quotas); if (newQuotas.hasThrottle()) { Throttle.Builder throttle = Throttle.newBuilder(newQuotas.getThrottle()); if (throttle.hasReqNum()) { throttle.setReqNum(updateTimedQuota(throttle.getReqNum(), factor)); } if (throttle.hasReqSize()) { throttle.setReqSize(updateTimedQuota(throttle.getReqSize(), factor)); } if (throttle.hasReadNum()) { throttle.setReadNum(updateTimedQuota(throttle.getReadNum(), factor)); } if (throttle.hasReadSize()) { throttle.setReadSize(updateTimedQuota(throttle.getReadSize(), factor)); } if (throttle.hasWriteNum()) { throttle.setWriteNum(updateTimedQuota(throttle.getWriteNum(), factor)); } if (throttle.hasWriteSize()) { throttle.setWriteSize(updateTimedQuota(throttle.getWriteSize(), factor)); } if (throttle.hasReqCapacityUnit()) { throttle.setReqCapacityUnit(updateTimedQuota(throttle.getReqCapacityUnit(), factor)); } if (throttle.hasReadCapacityUnit()) { throttle.setReadCapacityUnit(updateTimedQuota(throttle.getReadCapacityUnit(), factor)); } if (throttle.hasWriteCapacityUnit()) { throttle.setWriteCapacityUnit(updateTimedQuota(throttle.getWriteCapacityUnit(), factor)); } newQuotas.setThrottle(throttle.build()); } return newQuotas.build(); }
3.68
hudi_AvroSchemaCompatibility_schemaNameEquals
/** * Tests the equality of two Avro named schemas. * * <p> * Matching includes reader name aliases. * </p> * * @param reader Named reader schema. * @param writer Named writer schema. * @return whether the names of the named schemas match or not. */ public static boolean schemaNameEquals(final Schema reader, final Schema writer) { if (objectsEqual(reader.getName(), writer.getName())) { return true; } // Apply reader aliases: return reader.getAliases().contains(writer.getFullName()); }
3.68
hadoop_TimedHealthReporterService_setLastReportedTime
/** * Sets the last run time of the node health check. * * @param lastReportedTime last reported time in long */ private synchronized void setLastReportedTime(long lastReportedTime) { this.lastReportedTime = lastReportedTime; }
3.68
framework_ContainerOrderedWrapper_firstItemId
/* * Gets the first item stored in the ordered container Don't add a JavaDoc * comment here, we use the default documentation from implemented * interface. */ @Override public Object firstItemId() { if (ordered) { return ((Container.Ordered) container).firstItemId(); } return first; }
3.68
hbase_GsonSerializationFeature_bindFactory
/** * Helper method for smoothing over use of {@link SupplierFactoryAdapter}. Inspired by internal * implementation details of jersey itself. */ private <T> ServiceBindingBuilder<T> bindFactory(Supplier<T> supplier) { return bindFactory(new SupplierFactoryAdapter<>(supplier)); }
3.68
hbase_BlockingRpcCallback_get
/** * Returns the parameter passed to {@link #run(Object)} or {@code null} if a null value was * passed. When used asynchronously, this method will block until the {@link #run(Object)} method * has been called. * @return the response object or {@code null} if no response was passed */ public synchronized R get() throws IOException { while (!resultSet) { try { this.wait(); } catch (InterruptedException ie) { InterruptedIOException exception = new InterruptedIOException(ie.getMessage()); exception.initCause(ie); throw exception; } } return result; }
3.68
flink_MemorySegment_putLongBigEndian
/** * Writes the given long value (64bit, 8 bytes) to the given position in big endian byte order. * This method's speed depends on the system's native byte order, and it is possibly slower than * {@link #putLong(int, long)}. For most cases (such as transient storage in memory or * serialization for I/O and network), it suffices to know that the byte order in which the * value is written is the same as the one in which it is read, and {@link #putLong(int, long)} * is the preferable choice. * * @param index The position at which the value will be written. * @param value The long value to be written. * @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the * segment size minus 8. */ public void putLongBigEndian(int index, long value) { if (LITTLE_ENDIAN) { putLong(index, Long.reverseBytes(value)); } else { putLong(index, value); } }
3.68
hudi_MergeOnReadInputFormat_mayShiftInputSplit
/** * Shifts the input split by its consumed records number. * * <p>Note: This action is time-consuming. */ private void mayShiftInputSplit(MergeOnReadInputSplit split) throws IOException { if (split.isConsumed()) { // if the input split has been consumed before, // shift the input split with consumed num of records first for (long i = 0; i < split.getConsumed() && !reachedEnd(); i++) { nextRecord(null); } } }
3.68