name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
streampipes_AbstractConfigurablePipelineElementBuilder_requiredParameterAsCollection
/** * @param label A human-readable label that describes the required static property. * @param staticProperty * @return */ public K requiredParameterAsCollection(Label label, StaticProperty staticProperty) { CollectionStaticProperty collection = prepareStaticProperty(label, new CollectionStaticProperty()); collection.setStaticPropertyTemplate(staticProperty); this.staticProperties.add(collection); return me(); }
3.68
flink_TGetQueryIdReq_findByThriftIdOrThrow
/** * Find the _Fields constant that matches fieldId, throwing an exception if it is not found. */ public static _Fields findByThriftIdOrThrow(int fieldId) { _Fields fields = findByThriftId(fieldId); if (fields == null) throw new java.lang.IllegalArgumentException( "Field " + fieldId + " doesn't exist!"); return fields; }
3.68
flink_FlinkMatchers_containsCause
/** Checks for a {@link Throwable} that matches by class and message. */ public static Matcher<Throwable> containsCause(Throwable failureCause) { return new ContainsCauseAndMessageMatcher(failureCause); }
3.68
hibernate-validator_JavaBeanExecutable_recomputeParameterAnnotationsForJDK8303112
/** * This is a workaround for <a href="https://bugs.openjdk.org/browse/JDK-8303112">JDK-8303112</a>. * @param parameters The result of calling {@link Executable#getParameters()} * @param parameterAnnotationsArray The result of calling {@link Executable#getParameterAnnotations()} * @return A fixed version of {@code parameterAnnotationsArray}, * or {@code null} if {@code parameterAnnotationsArray} is fine an unaffected by JDK-8303112. */ private static Annotation[][] recomputeParameterAnnotationsForJDK8303112(Parameter[] parameters, Annotation[][] parameterAnnotationsArray) { int parameterCount = parameters.length; if ( parameterAnnotationsArray.length == parameterCount ) { // Not affected by JDK-8303112 return null; } // We're in a situation where parameter.getAnnotation()/parameter.getAnnotations() // is buggy when there are implicit/synthetic parameters, // because constructor.getParameterAnnotations() (wrongly) ignores implicit/synthetic parameters // while parameter.getAnnotations() (rightly) assumes they are present in the array. Annotation[][] annotationsForJDK8303112; annotationsForJDK8303112 = new Annotation[parameterCount][]; int nonImplicitNorSyntheticParamIndex = 0; for ( int i = 0; i < parameterCount; i++ ) { Parameter parameter = parameters[i]; if ( parameter.isImplicit() || parameter.isSynthetic() ) { annotationsForJDK8303112[i] = new Annotation[0]; } else if ( nonImplicitNorSyntheticParamIndex < parameterAnnotationsArray.length ) { annotationsForJDK8303112[i] = parameterAnnotationsArray[nonImplicitNorSyntheticParamIndex]; ++nonImplicitNorSyntheticParamIndex; } else { // Something is wrong; most likely the class wasn't compiled with -parameters // and so isImplicit/isSynthetic always return false. // As a last resort, assume the implicit/synthetic parameters are the first ones. nonImplicitNorSyntheticParamIndex = parameterCount - parameterAnnotationsArray.length; Arrays.fill( annotationsForJDK8303112, 0, nonImplicitNorSyntheticParamIndex, new Annotation[0] ); System.arraycopy( parameterAnnotationsArray, 0, annotationsForJDK8303112, nonImplicitNorSyntheticParamIndex, parameterAnnotationsArray.length ); return annotationsForJDK8303112; } } return annotationsForJDK8303112; }
3.68
hbase_TraceUtil_tracedRunnable
/** * Wrap the provided {@code runnable} in a {@link Runnable} that is traced. */ public static Runnable tracedRunnable(final Runnable runnable, final Supplier<Span> spanSupplier) { // N.B. This method name follows the convention of this class, i.e., tracedFuture, rather than // the convention of the OpenTelemetry classes, i.e., Context#wrap. return () -> { final Span span = spanSupplier.get(); try (final Scope ignored = span.makeCurrent()) { runnable.run(); span.setStatus(StatusCode.OK); } finally { span.end(); } }; }
3.68
hbase_SnapshotDescriptionUtils_getMaxMasterTimeout
/** * @param conf {@link Configuration} from which to check for the timeout * @param type type of snapshot being taken * @param defaultMaxWaitTime Default amount of time to wait, if none is in the configuration * @return the max amount of time the master should wait for a snapshot to complete */ public static long getMaxMasterTimeout(Configuration conf, SnapshotDescription.Type type, long defaultMaxWaitTime) { String confKey; switch (type) { case DISABLED: default: confKey = MASTER_SNAPSHOT_TIMEOUT_MILLIS; } return Math.max(conf.getLong(confKey, defaultMaxWaitTime), conf.getLong(MASTER_SNAPSHOT_TIMEOUT_MILLIS, defaultMaxWaitTime)); }
3.68
framework_Navigator_getCurrentView
/** * Return the currently active view. * * @since 7.6 * @return current view */ public View getCurrentView() { return currentView; }
3.68
hadoop_MoveStep_getTolerancePercent
/** * Tolerance Percentage indicates when a move operation is considered good * enough. This is a percentage of deviation from ideal that is considered * fine. * * For example : if the ideal amount on each disk was 1 TB and the * tolerance was 10%, then getting to 900 GB on the destination disk is * considerd good enough. * * @return tolerance percentage. */ @Override public long getTolerancePercent() { return tolerancePercent; }
3.68
hadoop_JobMetaData_setContainerEnd
/** * Add container release time. * * @param containerId id of the container. * @param time container release time. * @return the reference to current {@link JobMetaData}. */ public final JobMetaData setContainerEnd(final String containerId, final long time) { if (rawEnd.put(containerId, time) != null) { LOGGER.warn("find duplicate container release time for {}, so we replace" + " it with {}.", containerId, time); } return this; }
3.68
framework_WeekGrid_getDateCellIndex
/** * @param dateCell * @return get the index of the given date cell in this week, starting from * 0 */ public int getDateCellIndex(DateCell dateCell) { return content.getWidgetIndex(dateCell) - 1; }
3.68
hadoop_S3ACommitterFactory_chooseCommitterFactory
/** * Choose a committer from the FS and task configurations. Task Configuration * takes priority, allowing execution engines to dynamically change * committer on a query-by-query basis. * @param fileSystem FS * @param outputPath destination path * @param taskConf configuration from the task * @return An S3A committer if chosen, or "null" for the classic value * @throws PathCommitException on a failure to identify the committer */ private AbstractS3ACommitterFactory chooseCommitterFactory( S3AFileSystem fileSystem, Path outputPath, Configuration taskConf) throws PathCommitException { AbstractS3ACommitterFactory factory; // the FS conf will have had its per-bucket values resolved, unlike // job/task configurations. Configuration fsConf = fileSystem.getConf(); String name = fsConf.getTrimmed(FS_S3A_COMMITTER_NAME, COMMITTER_NAME_FILE); name = taskConf.getTrimmed(FS_S3A_COMMITTER_NAME, name); LOG.debug("Committer option is {}", name); switch (name) { case COMMITTER_NAME_FILE: factory = null; break; case COMMITTER_NAME_DIRECTORY: factory = new DirectoryStagingCommitterFactory(); break; case COMMITTER_NAME_PARTITIONED: factory = new PartitionedStagingCommitterFactory(); break; case COMMITTER_NAME_MAGIC: factory = new MagicS3GuardCommitterFactory(); break; case InternalCommitterConstants.COMMITTER_NAME_STAGING: factory = new StagingCommitterFactory(); break; default: throw new PathCommitException(outputPath, "Unknown committer: \"" + name + "\""); } return factory; }
3.68
rocketmq-connect_ColumnParser_getColumnParser
/** * Currently this class implementation is not complete yet. * @param dataType * @param colType * @param charset * @return */ public static ColumnParser getColumnParser(String dataType, String colType, String charset) { switch (dataType) { case "tinyint": case "smallint": case "mediumint": case "varint": case "int": return new IntColumnParser(dataType, colType); case "bigint": return new BigIntColumnParser(colType); case "tinytext": case "text": case "mediumtext": case "longtext": case "varchar": case "ascii": case "char": return new StringColumnParser(charset); case "date": case "datetime": case "timestamp": return new DateTimeColumnParser(); case "time": return new TimeColumnParser(); case "year": return new YearColumnParser(); case "enum": return new EnumColumnParser(colType); case "set": return new SetColumnParser(colType); case "boolean": return new BooleanColumnParser(); default: return new DefaultColumnParser(); } }
3.68
hbase_RegionPlan_getSource
/** * Get the source server for the plan for this region. * @return server info for source */ public ServerName getSource() { return source; }
3.68
hudi_RelationalDBBasedStorage_saveInstantMetadata
// todo: check correctness @Override public void saveInstantMetadata(long tableId, THoodieInstant instant, byte[] metadata) throws MetaserverStorageException { InstantBean instantBean = new InstantBean(tableId, instant); Map<String, Object> params = new HashMap<>(); params.put("instant", instantBean); params.put("metadata", metadata); // todo: array bytes to longblob timelineDao.insertBySql("insertInstantMetadata", params); }
3.68
querydsl_SQLExpressions_dateadd
/** * Create a dateadd(unit, date, amount) expression * * @param unit date part * @param date date * @param amount amount * @return converted date */ public static <D extends Comparable> DateExpression<D> dateadd(DatePart unit, DateExpression<D> date, int amount) { return Expressions.dateOperation(date.getType(), DATE_ADD_OPS.get(unit), date, ConstantImpl.create(amount)); }
3.68
pulsar_ConsumerConfiguration_setProperties
/** * Add all the properties in the provided map. * * @param properties * @return */ public ConsumerConfiguration setProperties(Map<String, String> properties) { conf.getProperties().putAll(properties); return this; }
3.68
pulsar_ClientConfiguration_isUseTls
/** * @return whether TLS encryption is used on the connection */ public boolean isUseTls() { return confData.isUseTls(); }
3.68
hudi_SecondaryIndexUtils_getSecondaryIndexes
/** * Get secondary index metadata for this table * * @param metaClient HoodieTableMetaClient * @return HoodieSecondaryIndex List */ public static Option<List<HoodieSecondaryIndex>> getSecondaryIndexes(HoodieTableMetaClient metaClient) { Option<String> indexesMetadata = metaClient.getTableConfig().getSecondaryIndexesMetadata(); return indexesMetadata.map(SecondaryIndexUtils::fromJsonString); }
3.68
morf_DeepCopyTransformations_deepCopy
/** * @see org.alfasoftware.morf.util.DeepCopyTransformation#deepCopy(org.alfasoftware.morf.util.DeepCopyableWithTransformation) */ @Override public <T> T deepCopy(DeepCopyableWithTransformation<T,? extends Builder<T>> element) { return element == null ? null : element.deepCopy(this).build(); }
3.68
hbase_CallRunner_cleanup
/** * Cleanup after ourselves... let go of references. */ private void cleanup() { this.call.cleanup(); this.call = null; this.rpcServer = null; }
3.68
shardingsphere-elasticjob_JobConfiguration_newBuilder
/** * Create ElasticJob configuration builder. * * @param jobName job name * @param shardingTotalCount sharding total count * @return ElasticJob configuration builder */ public static Builder newBuilder(final String jobName, final int shardingTotalCount) { return new Builder(jobName, shardingTotalCount); }
3.68
hbase_ServerCommandLine_usage
/** * Print usage information for this command line. * @param message if not null, print this message before the usage info. */ protected void usage(String message) { if (message != null) { System.err.println(message); System.err.println(""); } System.err.println(getUsage()); }
3.68
flink_TypeExtractionUtils_extractTypeArgument
/** * This method extracts the n-th type argument from the given type. An InvalidTypesException is * thrown if the type does not have any type arguments or if the index exceeds the number of * type arguments. * * @param t Type to extract the type arguments from * @param index Index of the type argument to extract * @return The extracted type argument * @throws InvalidTypesException if the given type does not have any type arguments or if the * index exceeds the number of type arguments. */ public static Type extractTypeArgument(Type t, int index) throws InvalidTypesException { if (t instanceof ParameterizedType) { Type[] actualTypeArguments = ((ParameterizedType) t).getActualTypeArguments(); if (index < 0 || index >= actualTypeArguments.length) { throw new InvalidTypesException( "Cannot extract the type argument with index " + index + " because the type has only " + actualTypeArguments.length + " type arguments."); } else { return actualTypeArguments[index]; } } else { throw new InvalidTypesException( "The given type " + t + " is not a parameterized type."); } }
3.68
morf_DataType_hasWidth
/** * @return Whether this DataType has a variable width */ public boolean hasWidth() { return hasWidth; }
3.68
framework_TabSheet_writeTabToDesign
/** * Writes the given tab to design * * @since 7.4 * @param design * the design node for tabsheet * @param designContext * the design context * @param tab * the tab to be written */ private void writeTabToDesign(Element design, DesignContext designContext, Tab tab) { // get default tab instance Tab def = new TabSheetTabImpl(null, null, null); // create element for tab Element tabElement = design.appendElement("tab"); // add tab content tabElement.appendChild(designContext.createElement(tab.getComponent())); Attributes attr = tabElement.attributes(); // write attributes DesignAttributeHandler.writeAttribute("visible", attr, tab.isVisible(), def.isVisible(), Boolean.class, designContext); DesignAttributeHandler.writeAttribute("closable", attr, tab.isClosable(), def.isClosable(), Boolean.class, designContext); DesignAttributeHandler.writeAttribute("caption", attr, tab.getCaption(), def.getCaption(), String.class, designContext); DesignAttributeHandler.writeAttribute("enabled", attr, tab.isEnabled(), def.isEnabled(), Boolean.class, designContext); DesignAttributeHandler.writeAttribute("icon", attr, tab.getIcon(), def.getIcon(), Resource.class, designContext); DesignAttributeHandler.writeAttribute("icon-alt", attr, tab.getIconAlternateText(), def.getIconAlternateText(), String.class, designContext); DesignAttributeHandler.writeAttribute("description", attr, tab.getDescription(), def.getDescription(), String.class, designContext); DesignAttributeHandler.writeAttribute("style-name", attr, tab.getStyleName(), def.getStyleName(), String.class, designContext); DesignAttributeHandler.writeAttribute("id", attr, tab.getId(), def.getId(), String.class, designContext); if (getSelectedTab() != null && getSelectedTab().equals(tab.getComponent())) { // use write attribute to get consistent handling for boolean DesignAttributeHandler.writeAttribute("selected", attr, true, false, boolean.class, designContext); } }
3.68
hbase_CellModel_setColumn
/** * @param column the column to set */ public void setColumn(byte[] column) { this.column = column; }
3.68
morf_OracleDialect_getSqlFrom
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlFrom(org.alfasoftware.morf.sql.ExceptSetOperator) */ @Override protected String getSqlFrom(ExceptSetOperator operator) { return String.format(" MINUS %s", // MINUS has been supported by Oracle for a long time and the EXCEPT support was added in 21c getSqlFrom(operator.getSelectStatement())); }
3.68
hadoop_CSQueueStore_remove
/** * Method for removing a queue from the store by name. * @param name A deterministic name for the queue to be removed */ public void remove(String name) { CSQueue queue = get(name); if (queue != null) { remove(queue); } }
3.68
dubbo_AbortPolicyWithReport_dispatchThreadPoolExhaustedEvent
/** * dispatch ThreadPoolExhaustedEvent * * @param msg */ public void dispatchThreadPoolExhaustedEvent(String msg) { listeners.forEach(listener -> listener.onEvent(new ThreadPoolExhaustedEvent(msg))); }
3.68
framework_TabSheetElement_getTabCaption
/** * Returns the caption text of the given tab. If the tab has no caption, * returns null. * * @param tabCell * A web element representing a tab, as given by * findElements(byTabCell).get(index). * @return The caption of tabCell or null if tabCell has no caption. */ private String getTabCaption(WebElement tabCell) { List<WebElement> captionElements = tabCell.findElements(byCaption); if (captionElements.isEmpty()) { return null; } else { return captionElements.get(0).getText(); } }
3.68
hbase_ZKProcedureCoordinator_getZkProcedureUtil
/** * Used in testing */ final ZKProcedureUtil getZkProcedureUtil() { return zkProc; }
3.68
querydsl_BooleanBuilder_orAllOf
/** * Create the union of this and the intersection of the given args * {@code (this || (arg1 && arg2 ... && argN))} * * @param args intersection of predicates * @return the current object */ public BooleanBuilder orAllOf(Predicate... args) { if (args.length > 0) { or(ExpressionUtils.allOf(args)); } return this; }
3.68
hbase_FilterBase_toString
/** * Return filter's info for debugging and logging purpose. */ @Override public String toString() { return this.getClass().getSimpleName(); }
3.68
graphhopper_Snap_calcSnappedPoint
/** * Calculates the closest point on the edge from the query point. If too close to a tower or pillar node this method * might change the snappedPosition and wayIndex. */ public void calcSnappedPoint(DistanceCalc distCalc) { if (closestEdge == null) throw new IllegalStateException("No closest edge?"); if (snappedPoint != null) throw new IllegalStateException("Calculate snapped point only once"); PointList fullPL = getClosestEdge().fetchWayGeometry(FetchMode.ALL); double tmpLat = fullPL.getLat(wayIndex); double tmpLon = fullPL.getLon(wayIndex); double tmpEle = fullPL.getEle(wayIndex); if (snappedPosition != Position.EDGE) { snappedPoint = new GHPoint3D(tmpLat, tmpLon, tmpEle); return; } double queryLat = getQueryPoint().lat, queryLon = getQueryPoint().lon; double adjLat = fullPL.getLat(wayIndex + 1), adjLon = fullPL.getLon(wayIndex + 1); if (distCalc.validEdgeDistance(queryLat, queryLon, tmpLat, tmpLon, adjLat, adjLon)) { GHPoint crossingPoint = distCalc.calcCrossingPointToEdge(queryLat, queryLon, tmpLat, tmpLon, adjLat, adjLon); double adjEle = fullPL.getEle(wayIndex + 1); // We want to prevent extra virtual nodes and very short virtual edges in case the snap/crossing point is // very close to a tower node. Since we delayed the calculation of the crossing point until here, we need // to correct the Snap.Position in these cases. Note that it is possible that the query point is very far // from the tower node, but the crossing point is still very close to it. if (considerEqual(crossingPoint.lat, crossingPoint.lon, tmpLat, tmpLon)) { snappedPosition = wayIndex == 0 ? Position.TOWER : Position.PILLAR; snappedPoint = new GHPoint3D(tmpLat, tmpLon, tmpEle); } else if (considerEqual(crossingPoint.lat, crossingPoint.lon, adjLat, adjLon)) { wayIndex++; snappedPosition = wayIndex == fullPL.size() - 1 ? Position.TOWER : Position.PILLAR; snappedPoint = new GHPoint3D(adjLat, adjLon, adjEle); } else { snappedPoint = new GHPoint3D(crossingPoint.lat, crossingPoint.lon, (tmpEle + adjEle) / 2); } } else { // outside of edge segment [wayIndex, wayIndex+1] should not happen for EDGE assert false : "incorrect pos: " + snappedPosition + " for " + snappedPoint + ", " + fullPL + ", " + wayIndex; } }
3.68
morf_SqlDialect_getSqlFromInsert
/** * Convert an {@link InsertStatement} into standards compliant SQL. * <p> * For example, the following code: * </p> * <blockquote> * * <pre> * InsertStatement stmt = new InsertStatement().into(new Table(&quot;agreement&quot;)).from(new Table(&quot;agreement&quot;)); * String result = dialect * .getSqlFrom(stmt); * </pre> * * </blockquote> * <p> * Will populate {@code result} with: * </p> * <blockquote> * * <pre> * INSERT INTO agreement (id, version, ...) SELECT id, version, ... FROM agreement * </pre> * * </blockquote> * * @param stmt the insert statement to generate SQL for * @param metadata the database schema. * @param idTable the ID Table. * @return a standards compliant SQL INSERT statement */ protected List<String> getSqlFromInsert(InsertStatement stmt, Schema metadata, Table idTable) { if (stmt.getTable() == null) { throw new IllegalArgumentException("Cannot specify a null destination table in an insert statement"); } if (stmt.getSelectStatement() == null) { throw new IllegalArgumentException("Cannot specify a null for the source select statement in getSqlFrom"); } SelectStatement sourceStatement = stmt.getSelectStatement(); List<String> result = new LinkedList<>(); StringBuilder stringBuilder = new StringBuilder(); stringBuilder.append(getSqlForInsertInto(stmt)); stringBuilder.append(tableNameWithSchemaName(stmt.getTable())); stringBuilder.append(" "); // Add the destination fields if (!stmt.getFields().isEmpty()) { // Only check the field count if we're operating with full knowledge of // the schema. // If we're not, then frankly the code concerned should know what it's // doing (e.g. // using DB auto-incremement columns or allowing fields to self-default) if (metadata != null && stmt.getFields().size() != sourceStatement.getFields().size()) { throw new IllegalArgumentException(String.format( "Insert statement and source select statement must use the same number of columns. Insert has [%d] but select has [%d].", stmt.getFields().size(), sourceStatement.getFields().size())); } // Use the fields specified by the caller stringBuilder.append("("); boolean firstRun = true; boolean explicitIdColumn = false; boolean explicitVersionColumn = false; for (AliasedField currentField : stmt.getFields()) { if (!(currentField instanceof FieldReference)) { throw new IllegalArgumentException("Cannot use a non-field reference in the fields section of an insert statement: [" + currentField.getAlias() + "]"); } FieldReference fieldRef = (FieldReference) currentField; if (!firstRun) { stringBuilder.append(", "); } stringBuilder.append(fieldRef.getName()); // Track if we have an id column (i.e. we don't need to default one in) explicitIdColumn |= fieldRef.getName().equalsIgnoreCase("id"); // Track if we have a version column (i.e. we don't need to default one // in) explicitVersionColumn |= fieldRef.getName().equalsIgnoreCase("version"); firstRun = false; } // Only augment the statement if we have the schema to work from if (metadata != null && idTable != null) { if (!explicitIdColumn && hasColumnNamed(stmt.getTable().getName(), metadata, "id")) { result.addAll(buildSimpleAutonumberUpdate(stmt.getTable(), "id", idTable, ID_INCREMENTOR_TABLE_COLUMN_NAME, ID_INCREMENTOR_TABLE_COLUMN_VALUE)); AliasedField idValue = nextIdValue(stmt.getTable(), stmt.getSelectStatement().getTable(), idTable, ID_INCREMENTOR_TABLE_COLUMN_NAME, ID_INCREMENTOR_TABLE_COLUMN_VALUE); stringBuilder.append(", id"); // Augment the select statement sourceStatement = sourceStatement.shallowCopy().fields(idValue).build(); } if (!explicitVersionColumn && hasColumnNamed(stmt.getTable().getName(), metadata, "version")) { stringBuilder.append(", version"); // Augment the select statement sourceStatement = sourceStatement.shallowCopy().fields(SqlUtils.literal(0).as("version")).build(); } } stringBuilder.append(") "); } // Add the select statement stringBuilder.append(getSqlFrom(sourceStatement)); result.add(stringBuilder.toString()); return result; }
3.68
framework_AbstractBeanContainer_addBean
/** * Adds a bean to the container using the bean item id resolver to find its * identifier. * * A bean id resolver must be set before calling this method. * * @see #addItem(Object, Object) * * @param bean * the bean to add * @return BeanItem<BEANTYPE> item added or null * @throws IllegalStateException * if no bean identifier resolver has been set * @throws IllegalArgumentException * if an identifier cannot be resolved for the bean */ protected BeanItem<BEANTYPE> addBean(BEANTYPE bean) throws IllegalStateException, IllegalArgumentException { if (bean == null) { return null; } IDTYPE itemId = resolveBeanId(bean); if (itemId == null) { throw new IllegalArgumentException( "Resolved identifier for a bean must not be null"); } return addItem(itemId, bean); }
3.68
morf_Criterion_greaterThanOrEqualTo
/** * Helper method to create a new "GREATER THAN OR EQUAL TO" expression. * * <blockquote><pre> * Criterion.greaterThanOrEqualTo(new Field("startdate"), 20091001);</pre></blockquote> * * @param field the field to evaluate in the expression (the left hand side of the expression) * @param value the value to evaluate in the expression (the right hand side) * @return a new Criterion object */ public static Criterion greaterThanOrEqualTo(AliasedField field, Object value) { return new Criterion(Operator.GTE, field, value); }
3.68
flink_EncodingUtils_toDigit
/** * Converts a hexadecimal character to an integer. * * <p>Copied from * https://github.com/apache/commons-codec/blob/master/src/main/java/org/apache/commons/codec/binary/Hex.java. * * @param ch A character to convert to an integer digit * @param idx The index of the character in the source * @return An integer * @throws TableException Thrown if ch is an illegal hex character */ private static int toDigit(final char ch, final int idx) throws TableException { final int digit = Character.digit(ch, 16); if (digit == -1) { throw new TableException( "Illegal hexadecimal character: [" + ch + "] at index: [" + idx + "]"); } return digit; }
3.68
flink_IterationAggregatorBroker_instance
/** Retrieve singleton instance. */ public static IterationAggregatorBroker instance() { return INSTANCE; }
3.68
hadoop_ProbeStatus_fail
/** * A probe has failed either because the test returned false, or an exception * was thrown. The {@link #success} field is set to false, any exception * thrown is recorded. * @param probe probe that failed * @param thrown an exception that was thrown. */ public void fail(Probe probe, Throwable thrown) { finish(probe, false, "Failure in " + probe, thrown); }
3.68
shardingsphere-elasticjob_JobRegistry_shutdown
/** * Shutdown job schedule. * * @param jobName job name */ public void shutdown(final String jobName) { Optional.ofNullable(schedulerMap.remove(jobName)).ifPresent(JobScheduleController::shutdown); Optional.ofNullable(regCenterMap.remove(jobName)).ifPresent(regCenter -> regCenter.evictCacheData("/" + jobName)); ListenerNotifierManager.getInstance().removeJobNotifyExecutor(jobName); jobInstanceMap.remove(jobName); jobRunningMap.remove(jobName); currentShardingTotalCountMap.remove(jobName); }
3.68
hadoop_StateStoreSerializer_newRecord
/** * Create a new record. * * @param clazz Class of the new record. * @param <T> Type of the record. * @return New record. */ public static <T> T newRecord(Class<T> clazz) { return getSerializer(null).newRecordInstance(clazz); }
3.68
pulsar_OffloadPoliciesImpl_getCompatibleValue
/** * Make configurations of the OffloadPolicies compatible with the config file. * * <p>The names of the fields {@link OffloadPoliciesImpl#managedLedgerOffloadDeletionLagInMillis} * and {@link OffloadPoliciesImpl#managedLedgerOffloadThresholdInBytes} are not matched with * config file (broker.conf or standalone.conf). * * @param properties broker configuration properties * @param field filed * @return field value */ private static Object getCompatibleValue(Properties properties, Field field) { Object object; if (field.getName().equals("managedLedgerOffloadThresholdInBytes")) { object = properties.getProperty("managedLedgerOffloadThresholdInBytes", properties.getProperty(OFFLOAD_THRESHOLD_NAME_IN_CONF_FILE)); } else if (field.getName().equals("managedLedgerOffloadDeletionLagInMillis")) { object = properties.getProperty("managedLedgerOffloadDeletionLagInMillis", properties.getProperty(DELETION_LAG_NAME_IN_CONF_FILE)); } else if (field.getName().equals("managedLedgerOffloadedReadPriority")) { object = properties.getProperty("managedLedgerOffloadedReadPriority", properties.getProperty(DATA_READ_PRIORITY_NAME_IN_CONF_FILE)); } else { object = properties.get(field.getName()); } return value((String) object, field); }
3.68
hbase_DNS_getHostname
/** * Get the configured hostname for a given ServerType. Gets the default hostname if not specified * in the configuration. * @param conf Configuration to look up. * @param serverType ServerType to look up in the configuration for overrides. */ public static String getHostname(@NonNull Configuration conf, @NonNull ServerType serverType) throws UnknownHostException { String hostname; switch (serverType) { case MASTER: hostname = conf.get(MASTER_HOSTNAME_KEY); break; case REGIONSERVER: hostname = conf.get(UNSAFE_RS_HOSTNAME_KEY); break; default: hostname = null; } if (hostname == null || hostname.isEmpty()) { return Strings.domainNamePointerToHostName( getDefaultHost(conf.get("hbase." + serverType.getName() + ".dns.interface", "default"), conf.get("hbase." + serverType.getName() + ".dns.nameserver", "default"))); } else { return hostname; } }
3.68
hbase_ColumnCount_increment
/** * Increment the current version count * @return current count */ public int increment() { return ++count; }
3.68
AreaShop_RegionSign_getFacing
/** * Get the facing of the sign as saved in the config. * @return BlockFace the sign faces, or null if unknown */ public BlockFace getFacing() { try { return BlockFace.valueOf(getRegion().getConfig().getString("general.signs." + key + ".facing")); } catch(NullPointerException | IllegalArgumentException e) { return null; } }
3.68
framework_GridKeyUpEvent_isLeftArrow
/** * Is this a left arrow? * * @return whether this is a left arrow key event */ public boolean isLeftArrow() { return getNativeKeyCode() == KeyCodes.KEY_LEFT; }
3.68
hadoop_ResourceUsageMetrics_size
/** * Returns the size of the serialized data */ public int size() { int size = 0; size += WritableUtils.getVIntSize(cumulativeCpuUsage); // long #1 size += WritableUtils.getVIntSize(virtualMemoryUsage); // long #2 size += WritableUtils.getVIntSize(physicalMemoryUsage); // long #3 size += WritableUtils.getVIntSize(heapUsage); // long #4 return size; }
3.68
hadoop_OBSDataBlocks_close
/** * After the stream is closed, set the local reference to the byte buffer * to null; this guarantees that future attempts to use stream methods * will fail. */ @Override public synchronized void close() { LOG.debug("ByteBufferInputStream.close() for {}", ByteBufferBlock.super.toString()); byteBuffer = null; }
3.68
streampipes_GuessSchemaBuilder_create
/** * Creates a new guess schema object using the builder pattern. */ public static GuessSchemaBuilder create() { return new GuessSchemaBuilder(); }
3.68
flink_DeclarationRewriter_extractLocalVariable
/** @return new name. */ private String extractLocalVariable( JavaParser.VariableDeclaratorIdContext decId, JavaParser.TypeTypeContext typeType, boolean forceNewName) { String name = decId.getText(); if (forceNewName || allVarNames.contains(name)) { // here we assume that the original code can be successfully compiled. // that is to say, the scope of two variables with the same name will not // overlap. String newName = CodeSplitUtil.newName("local"); replaceMap.put(name, newName); newLocalVariables .append(typeType.getText()) .append(" ") .append(newName) .append(";\n"); return newName; } else { newLocalVariables .append(typeType.getText()) .append(" ") .append(name) .append(";\n"); allVarNames.add(name); return name; } }
3.68
hadoop_JobTokenSecretManager_addTokenForJob
/** * Add the job token of a job to cache * @param jobId the job that owns the token * @param token the job token */ public void addTokenForJob(String jobId, Token<JobTokenIdentifier> token) { SecretKey tokenSecret = createSecretKey(token.getPassword()); synchronized (currentJobTokens) { currentJobTokens.put(jobId, tokenSecret); } }
3.68
pulsar_PulsarMockBookKeeper_returnEmptyLedgerAfter
/** * After N times, make a ledger to appear to be empty. */ public synchronized void returnEmptyLedgerAfter(int steps) { emptyLedgerAfter = steps; }
3.68
framework_VAccordion_close
/** * Closes this stack item and removes the wrapped widget from the DOM * tree and this stack item. */ public void close() { if (widget != null) { remove(widget); } content.getStyle().setVisibility(Visibility.HIDDEN); content.getStyle().setTop(-100000, Unit.PX); content.getStyle().setLeft(-100000, Unit.PX); removeStyleDependentName("open"); setHeight(-1); setWidth(""); open = false; getElement().setTabIndex(-1); }
3.68
framework_ComboBox_getLastItemIndexOnCurrentPage
/** * Returns the index of the last item on the current page. The index is to * the underlying (possibly filtered) contents. If needNullSelectOption is * true, the null item takes up the first slot on the first page, * effectively reducing the first page size by one. * * @param needNullSelectOption * true if a null option should be shown before any other options * (takes up the first slot on the first page, not counted in * index) * @param size * number of items after filtering (not including the null item, * if any) * @param first * index in the filtered view of the first item of the page * @return index in the filtered view of the last item on the page */ private int getLastItemIndexOnCurrentPage(boolean needNullSelectOption, int size, int first) { // page length usable for non-null items int effectivePageLength = pageLength - (needNullSelectOption && (currentPage == 0) ? 1 : 0); // zero pageLength implies infinite page size return pageLength == 0 ? size - 1 : Math.min(size - 1, first + effectivePageLength - 1); }
3.68
pulsar_PrometheusTextFormat_write004
/** * Write out the text version 0.0.4 of the given MetricFamilySamples. */ public static void write004(Writer writer, Enumeration<Collector.MetricFamilySamples> mfs) throws IOException { /* * See http://prometheus.io/docs/instrumenting/exposition_formats/ for the output format specification. */ while (mfs.hasMoreElements()) { Collector.MetricFamilySamples metricFamilySamples = mfs.nextElement(); writer.write("# TYPE "); writer.write(metricFamilySamples.name); writer.write(' '); writer.write(metricFamilySamples.type.name().toLowerCase()); writer.write('\n'); for (Collector.MetricFamilySamples.Sample sample : metricFamilySamples.samples) { writer.write(sample.name); if (sample.labelNames.size() > 0) { writer.write('{'); for (int i = 0; i < sample.labelNames.size(); ++i) { writer.write(sample.labelNames.get(i)); writer.write("=\""); writeEscapedLabelValue(writer, sample.labelValues.get(i)); writer.write("\","); } writer.write('}'); } writer.write(' '); writer.write(Collector.doubleToGoString(sample.value)); if (sample.timestampMs != null) { writer.write(' '); writer.write(sample.timestampMs.toString()); } writer.write('\n'); } } }
3.68
graphhopper_LandmarkStorage_estimateMaxWeight
/** * This method returns the maximum weight for the graph starting from the landmarks */ private double estimateMaxWeight(List<IntArrayList> graphComponents, EdgeFilter accessFilter) { double maxWeight = 0; int searchedSubnetworks = 0; Random random = new Random(0); // the maximum weight can only be an approximation so there is only a tiny improvement when we would do this for // all landmarks. See #2027 (1st commit) where only 1 landmark was sufficient when multiplied with 1.01 at the end // TODO instead of calculating the landmarks again here we could store them in landmarkIDs and do this for all here int[] tmpLandmarkNodeIds = new int[3]; for (IntArrayList subnetworkIds : graphComponents) { if (subnetworkIds.size() < minimumNodes) continue; searchedSubnetworks++; int maxRetries = Math.max(subnetworkIds.size(), 100); for (int retry = 0; retry < maxRetries; retry++) { int index = random.nextInt(subnetworkIds.size()); int nextStartNode = subnetworkIds.get(index); LandmarkExplorer explorer = findLandmarks(tmpLandmarkNodeIds, nextStartNode, accessFilter, "estimate " + index); if (explorer.getFromCount() < minimumNodes) { LOGGER.error("method findLandmarks for " + createPoint(graph, nextStartNode) + " (" + nextStartNode + ")" + " resulted in too few visited nodes: " + explorer.getFromCount() + " vs expected minimum " + minimumNodes + ", see #2256"); continue; } // starting for (int lmIdx = 0; lmIdx < tmpLandmarkNodeIds.length; lmIdx++) { int lmNodeId = tmpLandmarkNodeIds[lmIdx]; explorer = new LandmarkExplorer(graph, this, weighting, traversalMode, accessFilter, false); explorer.setStartNode(lmNodeId); explorer.runAlgo(); maxWeight = Math.max(maxWeight, explorer.getLastEntry().weight); } break; } } if (maxWeight <= 0 && searchedSubnetworks > 0) throw new IllegalStateException("max weight wasn't set although " + searchedSubnetworks + " subnetworks were searched (total " + graphComponents.size() + "), minimumNodes:" + minimumNodes); // we have to increase maxWeight slightly as it is only an approximation towards the maximum weight, // especially when external landmarks are provided, but also because we do not traverse all landmarks return maxWeight * 1.008; }
3.68
framework_VGridLayout_distributeRowSpanHeights
/** * Iterates rowspanned cells, ensures rows have enough space to accommodate * them */ private void distributeRowSpanHeights() { for (SpanList list : rowSpans) { for (Cell cell : list.cells) { // cells with relative content may return non 0 here if on // subsequent renders int height = cell.hasRelativeHeight() ? 0 : cell.getHeight(); distributeSpanSize(rowHeights, cell.row, cell.rowspan, getVerticalSpacing(), height, rowExpandRatioArray); } } }
3.68
framework_FlyweightRow_attached
/** * Creates a new iterator of attached flyweight cells. A cell is * attached if it has a corresponding {@link FlyweightCell#getElement() * DOM element} attached to the row element. * * @param cells * the collection of cells to iterate */ public static CellIterator attached( final Collection<FlyweightCell> cells) { return new CellIterator(cells, true); }
3.68
hadoop_NMClient_getNMTokenCache
/** * Get the NM token cache of the <code>NMClient</code>. This cache must be * shared with the {@link AMRMClient} that requested the containers managed * by this <code>NMClient</code> * <p> * If a NM token cache is not set, the {@link NMTokenCache#getSingleton()} * singleton instance will be used. * * @return the NM token cache */ public NMTokenCache getNMTokenCache() { return nmTokenCache; }
3.68
hadoop_AbfsOperationMetrics_getStartTime
/** * * @return start time of metric collection. */ long getStartTime() { return startTime; }
3.68
hadoop_CheckpointCommand_needToReturnImage
/** * Indicates whether the new checkpoint image needs to be transfered * back to the name-node after the checkpoint is done. * * @return true if the checkpoint should be returned back. */ public boolean needToReturnImage() { return needToReturnImage; }
3.68
hadoop_NamenodeStatusReport_getNumEnteringMaintenanceDataNodes
/** * Get the number of entering maintenance nodes. * * @return The number of entering maintenance nodes. */ public int getNumEnteringMaintenanceDataNodes() { return this.enteringMaintenanceDataNodes; }
3.68
framework_VTwinColSelect_setReadOnly
/** * Sets this twin column select as read only, meaning selection cannot be * changed. * * @param readOnly * {@code true} for read only, {@code false} for not read only */ public void setReadOnly(boolean readOnly) { if (this.readOnly != readOnly) { this.readOnly = readOnly; updateEnabledState(); } }
3.68
hadoop_YarnClientUtils_buildNodeLabelsFromStr
/** * Creates node labels from string * @param args nodelabels string to be parsed * @return list of node labels */ public static List<NodeLabel> buildNodeLabelsFromStr(String args) { List<NodeLabel> nodeLabels = new ArrayList<>(); for (String p : args.split(",")) { if (!p.trim().isEmpty()) { String labelName = p; // Try to parse exclusive boolean exclusive = NodeLabel.DEFAULT_NODE_LABEL_EXCLUSIVITY; int leftParenthesisIdx = p.indexOf("("); int rightParenthesisIdx = p.indexOf(")"); if ((leftParenthesisIdx == -1 && rightParenthesisIdx != -1) || (leftParenthesisIdx != -1 && rightParenthesisIdx == -1)) { // Parentheses not match throw new IllegalArgumentException(ADD_LABEL_FORMAT_ERR_MSG); } if (leftParenthesisIdx > 0 && rightParenthesisIdx > 0) { if (leftParenthesisIdx > rightParenthesisIdx) { // Parentheses not match throw new IllegalArgumentException(ADD_LABEL_FORMAT_ERR_MSG); } String property = p.substring(p.indexOf("(") + 1, p.indexOf(")")); if (property.contains("=")) { String key = property.substring(0, property.indexOf("=")).trim(); String value = property .substring(property.indexOf("=") + 1, property.length()) .trim(); // Now we only support one property, which is exclusive, so check if // key = exclusive and value = {true/false} if ("exclusive".equals(key) && ImmutableSet.of("true", "false").contains(value)) { exclusive = Boolean.parseBoolean(value); } else { throw new IllegalArgumentException(ADD_LABEL_FORMAT_ERR_MSG); } } else if (!property.trim().isEmpty()) { throw new IllegalArgumentException(ADD_LABEL_FORMAT_ERR_MSG); } } // Try to get labelName if there's "(..)" if (labelName.contains("(")) { labelName = labelName.substring(0, labelName.indexOf("(")).trim(); } nodeLabels.add(NodeLabel.newInstance(labelName, exclusive)); } } if (nodeLabels.isEmpty()) { throw new IllegalArgumentException(NO_LABEL_ERR_MSG); } return nodeLabels; }
3.68
hbase_HFileBlock_getDeserializer
// Cacheable implementation @Override public CacheableDeserializer<Cacheable> getDeserializer() { return HFileBlock.BLOCK_DESERIALIZER; }
3.68
morf_AbstractSqlDialectTest_testDeleteWithLimitWithoutWhereCriterion
/** * Tests that a delete string with a limit and no where criterion is created correctly. */ @Test public void testDeleteWithLimitWithoutWhereCriterion() { DeleteStatement stmt = DeleteStatement .delete(new TableReference(TEST_TABLE)) .limit(1000) .build(); assertEquals("Delete with limit", expectedDeleteWithLimitWithoutWhere(), testDialect.convertStatementToSQL(stmt)); }
3.68
framework_FocusableHTML_addFocusHandler
/* * (non-Javadoc) * * @see * com.google.gwt.event.dom.client.HasFocusHandlers#addFocusHandler(com. * google.gwt.event.dom.client.FocusHandler) */ @Override public HandlerRegistration addFocusHandler(FocusHandler handler) { return addDomHandler(handler, FocusEvent.getType()); }
3.68
hbase_StoreFileTrackerValidationUtils_validatePreRestoreSnapshot
/** * Makes sure restoring a snapshot does not break the current SFT setup follows * StoreUtils.createStoreConfiguration * @param currentTableDesc Existing Table's TableDescriptor * @param snapshotTableDesc Snapshot's TableDescriptor * @param baseConf Current global configuration * @throws RestoreSnapshotException if restore would break the current SFT setup */ public static void validatePreRestoreSnapshot(TableDescriptor currentTableDesc, TableDescriptor snapshotTableDesc, Configuration baseConf) throws RestoreSnapshotException { for (ColumnFamilyDescriptor cfDesc : currentTableDesc.getColumnFamilies()) { ColumnFamilyDescriptor snapCFDesc = snapshotTableDesc.getColumnFamily(cfDesc.getName()); // if there is no counterpart in the snapshot it will be just deleted so the config does // not matter if (snapCFDesc != null) { Configuration currentCompositeConf = StoreUtils.createStoreConfiguration(baseConf, currentTableDesc, cfDesc); Configuration snapCompositeConf = StoreUtils.createStoreConfiguration(baseConf, snapshotTableDesc, snapCFDesc); Class<? extends StoreFileTracker> currentSFT = StoreFileTrackerFactory.getTrackerClass(currentCompositeConf); Class<? extends StoreFileTracker> snapSFT = StoreFileTrackerFactory.getTrackerClass(snapCompositeConf); // restoration is not possible if there is an SFT mismatch if (currentSFT != snapSFT) { throw new RestoreSnapshotException( "Restoring Snapshot is not possible because " + " the config for column family " + cfDesc.getNameAsString() + " has incompatible configuration. Current SFT: " + currentSFT + " SFT from snapshot: " + snapSFT); } } } }
3.68
hudi_CollectionUtils_combine
/** * Combines provided {@link Map}s into one, returning new instance of {@link HashMap}. * * NOTE: That values associated with overlapping keys from the second map, will override * values from the first one */ public static <K, V> HashMap<K, V> combine(Map<K, V> one, Map<K, V> another, BiFunction<V, V, V> merge) { HashMap<K, V> combined = new HashMap<>(one.size() + another.size()); combined.putAll(one); another.forEach((k, v) -> combined.merge(k, v, merge)); return combined; }
3.68
framework_AbstractListing_getItemIconGenerator
/** * Gets the currently used item icon generator. The default item icon * provider returns null for all items, resulting in no icons being used. * <p> * Implementations that support item icons make this method public. * * @see IconGenerator * @see #setItemIconGenerator(IconGenerator) * * @return the currently used item icon generator, not null */ protected IconGenerator<T> getItemIconGenerator() { return itemIconGenerator; }
3.68
hbase_RESTServlet_shutdown
/** * Shutdown any services that need to stop */ void shutdown() { if (pauseMonitor != null) pauseMonitor.stop(); if (connectionCache != null) connectionCache.shutdown(); }
3.68
hadoop_Trash_getTrashPolicy
/** * get the configured trash policy. * * @return TrashPolicy. */ TrashPolicy getTrashPolicy() { return trashPolicy; }
3.68
framework_AbstractSplitPanelElement_getSecondComponent
/** * Gets the second component of a split panel and wraps it in given class. * * @param clazz * Components element class * @return Second component wrapped in given class */ public <T extends AbstractElement> T getSecondComponent(Class<T> clazz) { return getContainedComponent(clazz, bySecondContainer); }
3.68
flink_MergeTableLikeUtil_mergeTables
/** * Merges the schema part of {@code CREATE TABLE} statement. It merges * * <ul> * <li>columns * <li>computed columns * <li>watermarks * <li>primary key * </ul> * * <p>Additionally it performs validation of the features of the derived table. This is not done * in the {@link SqlCreateTable#validate()} anymore because the validation should be done on top * of the merged properties. E.g. Some of the columns used in computed columns of the derived * table can be defined in the source table. */ public Schema mergeTables( Map<FeatureOption, MergingStrategy> mergingStrategies, Schema sourceSchema, List<SqlNode> derivedColumns, List<SqlWatermark> derivedWatermarkSpecs, SqlTableConstraint derivedPrimaryKey) { SchemaBuilder schemaBuilder = new SchemaBuilder( mergingStrategies, sourceSchema, (FlinkTypeFactory) validator.getTypeFactory(), dataTypeFactory, validator, escapeExpression); schemaBuilder.appendDerivedColumns(mergingStrategies, derivedColumns); schemaBuilder.appendDerivedWatermarks(mergingStrategies, derivedWatermarkSpecs); schemaBuilder.appendDerivedPrimaryKey(derivedPrimaryKey); return schemaBuilder.build(); }
3.68
hbase_TableIntegrityErrorHandlerImpl_handleHoleInRegionChain
/** * {@inheritDoc} */ @Override public void handleHoleInRegionChain(byte[] holeStart, byte[] holeEnd) throws IOException { }
3.68
hadoop_BlockData_getSize
/** * Gets the size of the given block. * @param blockNumber the id of the desired block. * @return the size of the given block. */ public int getSize(int blockNumber) { if (fileSize == 0) { return 0; } if (isLastBlock(blockNumber)) { return (int) (fileSize - (((long) blockSize) * (numBlocks - 1))); } else { return blockSize; } }
3.68
flink_SplitFetcherManager_createSplitFetcher
/** * Synchronize method to ensure no fetcher is created after the split fetcher manager has * closed. * * @return the created split fetcher. * @throws IllegalStateException if the split fetcher manager has closed. */ protected synchronized SplitFetcher<E, SplitT> createSplitFetcher() { if (closed) { throw new IllegalStateException("The split fetcher manager has closed."); } // Create SplitReader. SplitReader<E, SplitT> splitReader = splitReaderFactory.get(); int fetcherId = fetcherIdGenerator.getAndIncrement(); SplitFetcher<E, SplitT> splitFetcher = new SplitFetcher<>( fetcherId, elementsQueue, splitReader, errorHandler, () -> { fetchers.remove(fetcherId); // We need this to synchronize status of fetchers to concurrent partners // as // ConcurrentHashMap's aggregate status methods including size, isEmpty, // and // containsValue are not designed for program control. elementsQueue.notifyAvailable(); }, this.splitFinishedHook, allowUnalignedSourceSplits); fetchers.put(fetcherId, splitFetcher); return splitFetcher; }
3.68
hadoop_LoggingAuditor_set
/** * Pass to the HTTP referrer. * {@inheritDoc} */ @Override public void set(final String key, final String value) { referrer.set(key, value); }
3.68
morf_H2Dialect_alterTableChangeColumnStatements
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#alterTableChangeColumnStatements(org.alfasoftware.morf.metadata.Table, org.alfasoftware.morf.metadata.Column, org.alfasoftware.morf.metadata.Column) */ @Override public Collection<String> alterTableChangeColumnStatements(Table table, Column oldColumn, Column newColumn) { List<String> result = new ArrayList<>(); if (oldColumn.isPrimaryKey() && !newColumn.isPrimaryKey()) { result.add(dropPrimaryKeyConstraintStatement(table)); } // Rename has to happen BEFORE any operations on the newly renamed column if (!newColumn.getName().equals(oldColumn.getName())) { result.add("ALTER TABLE " + schemaNamePrefix() + table.getName() + " ALTER COLUMN " + oldColumn.getName() + " RENAME TO " + newColumn.getName()); } // Now do column operations on the new if (StringUtils.isNotEmpty(newColumn.getDefaultValue())) { result.add("ALTER TABLE " + schemaNamePrefix() + table.getName() + " ALTER COLUMN " + newColumn.getName() + " SET DEFAULT " + sqlForDefaultClauseLiteral(newColumn)); } if (oldColumn.isNullable() != newColumn.isNullable()) { result.add("ALTER TABLE " + schemaNamePrefix() + table.getName() + " ALTER COLUMN " + newColumn.getName() + " SET " + (newColumn.isNullable() ? "NULL" : "NOT NULL")); } if (oldColumn.getType() != newColumn.getType() || oldColumn.getScale() != newColumn.getScale() || oldColumn.getWidth() != newColumn.getWidth() || !StringUtils.equals(oldColumn.getDefaultValue(), newColumn.getDefaultValue()) || oldColumn.isAutoNumbered() != newColumn.isAutoNumbered()) { result.add("ALTER TABLE " + schemaNamePrefix() + table.getName() + " ALTER COLUMN " + newColumn.getName() + " " + sqlRepresentationOfColumnType(newColumn, false, false, true)); } // rebuild the PK if required List<Column> primaryKeys = primaryKeysForTable(table); if (oldColumn.isPrimaryKey() != newColumn.isPrimaryKey() && !primaryKeys.isEmpty()) { result.add(addPrimaryKeyConstraintStatement(table, namesOfColumns(primaryKeys))); } return result; }
3.68
flink_MessageHeaders_getCustomHeaders
/** * Returns a collection of custom HTTP headers. * * <p>This default implementation returns an empty list. Override this method to provide custom * headers if needed. * * @return a collection of custom {@link HttpHeaders}, empty by default. */ default Collection<HttpHeader> getCustomHeaders() { return Collections.emptyList(); }
3.68
flink_DynamicSinkUtils_getRequireColumnsIndexAndExtraMetaCols
/** Get the index for the required columns and extra meta cols if necessary. */ private static Tuple2<List<Integer>, List<MetadataColumn>> getRequireColumnsIndexAndExtraMetaCols( LogicalTableScan tableScan, List<Column> requiredColumns, ResolvedSchema resolvedSchema) { // index list for the required columns List<Integer> columnIndexList = new ArrayList<>(); // extra meta cols List<MetadataColumn> extraMetadataColumns = new ArrayList<>(); List<String> fieldNames = resolvedSchema.getColumnNames(); final TableSourceTable sourceTable = tableScan.getTable().unwrap(TableSourceTable.class); DynamicTableSource dynamicTableSource = sourceTable.tableSource(); int additionCols = 0; // iterate for each required column for (Column column : requiredColumns) { int index = fieldNames.indexOf(column.getName()); // if we can't find the column, we may need to add extra column if (index <= -1) { // we only consider add metadata column if (column instanceof Column.MetadataColumn) { // need to add meta column columnIndexList.add(fieldNames.size() + additionCols); if (!(dynamicTableSource instanceof SupportsReadingMetadata)) { throw new UnsupportedOperationException( String.format( "The table source don't support reading metadata, but the require columns contains the meta columns: %s.", column)); } // list what metas the source supports to read SupportsReadingMetadata supportsReadingMetadata = (SupportsReadingMetadata) dynamicTableSource; Map<String, DataType> readableMetadata = supportsReadingMetadata.listReadableMetadata(); // check the source can read the meta column String metaCol = ((MetadataColumn) column).getMetadataKey().orElse(column.getName()); if (!readableMetadata.containsKey(metaCol)) { throw new IllegalArgumentException( String.format( "Expect to read the meta column %s, but the table source for table %s doesn't support read the metadata column." + "Please make sure the readable metadata for the source contains %s.", column, UnresolvedIdentifier.of( tableScan.getTable().getQualifiedName()), metaCol)); } // mark it as extra col additionCols += 1; DataType dataType = readableMetadata.get(metaCol); if (!dataType.equals(column.getDataType())) { throw new IllegalArgumentException( String.format( "Un-matched data type: the required column %s has datatype %s, but the data type in readable metadata for the table %s has data type %s. ", column, column.getDataType(), UnresolvedIdentifier.of( tableScan.getTable().getQualifiedName()), dataType)); } extraMetadataColumns.add((MetadataColumn) column); } else { throw new IllegalArgumentException("Unknown required column " + column); } } else { columnIndexList.add(index); } } return Tuple2.of(columnIndexList, extraMetadataColumns); }
3.68
hbase_HFileLink_getHFileLinkPatternRelativePath
/** * Convert a HFileLink path to a table relative path. e.g. the link: * /hbase/test/0123/cf/testtb=4567-abcd becomes: /hbase/testtb/4567/cf/abcd * @param path HFileLink path * @return Relative table path * @throws IOException on unexpected error. */ private static Path getHFileLinkPatternRelativePath(final Path path) { // table=region-hfile Matcher m = REF_OR_HFILE_LINK_PATTERN.matcher(path.getName()); if (!m.matches()) { throw new IllegalArgumentException(path.getName() + " is not a valid HFileLink pattern!"); } // Convert the HFileLink name into a real table/region/cf/hfile path. TableName tableName = TableName.valueOf(m.group(1), m.group(2)); String regionName = m.group(3); String hfileName = m.group(4); String familyName = path.getParent().getName(); Path tableDir = CommonFSUtils.getTableDir(new Path("./"), tableName); return new Path(tableDir, new Path(regionName, new Path(familyName, hfileName))); }
3.68
hbase_ZKWatcher_prefix
/** * Adds this instance's identifier as a prefix to the passed <code>str</code> * @param str String to amend. * @return A new string with this instance's identifier as prefix: e.g. if passed 'hello world', * the returned string could be */ public String prefix(final String str) { return this.toString() + " " + str; }
3.68
flink_DataSinkTask_initOutputFormat
/** * Initializes the OutputFormat implementation and configuration. * * @throws RuntimeException Throws if instance of OutputFormat implementation can not be * obtained. */ private void initOutputFormat() { ClassLoader userCodeClassLoader = getUserCodeClassLoader(); // obtain task configuration (including stub parameters) Configuration taskConf = getTaskConfiguration(); this.config = new TaskConfig(taskConf); final Pair<OperatorID, OutputFormat<IT>> operatorIDAndOutputFormat; InputOutputFormatContainer formatContainer = new InputOutputFormatContainer(config, userCodeClassLoader); try { operatorIDAndOutputFormat = formatContainer.getUniqueOutputFormat(); this.format = operatorIDAndOutputFormat.getValue(); // check if the class is a subclass, if the check is required if (!OutputFormat.class.isAssignableFrom(this.format.getClass())) { throw new RuntimeException( "The class '" + this.format.getClass().getName() + "' is not a subclass of '" + OutputFormat.class.getName() + "' as is required."); } } catch (ClassCastException ccex) { throw new RuntimeException( "The stub class is not a proper subclass of " + OutputFormat.class.getName(), ccex); } Thread thread = Thread.currentThread(); ClassLoader original = thread.getContextClassLoader(); // configure the stub. catch exceptions here extra, to report them as originating from the // user code try { thread.setContextClassLoader(userCodeClassLoader); this.format.configure( formatContainer.getParameters(operatorIDAndOutputFormat.getKey())); } catch (Throwable t) { throw new RuntimeException( "The user defined 'configure()' method in the Output Format caused an error: " + t.getMessage(), t); } finally { thread.setContextClassLoader(original); } }
3.68
hbase_RESTServletContainer_service
/** * This container is used only if authentication and impersonation is enabled. The remote request * user is used as a proxy user for impersonation in invoking any REST service. */ @Override public void service(final HttpServletRequest request, final HttpServletResponse response) throws ServletException, IOException { final HttpServletRequest lowerCaseRequest = toLowerCase(request); final String doAsUserFromQuery = lowerCaseRequest.getParameter("doas"); RESTServlet servlet = RESTServlet.getInstance(); if (doAsUserFromQuery != null) { Configuration conf = servlet.getConfiguration(); if (!servlet.supportsProxyuser()) { throw new ServletException("Support for proxyuser is not configured"); } // Authenticated remote user is attempting to do 'doAs' proxy user. UserGroupInformation ugi = UserGroupInformation.createRemoteUser(request.getRemoteUser()); // create and attempt to authorize a proxy user (the client is attempting // to do proxy user) ugi = UserGroupInformation.createProxyUser(doAsUserFromQuery, ugi); // validate the proxy user authorization try { ProxyUsers.authorize(ugi, request.getRemoteAddr(), conf); } catch (AuthorizationException e) { throw new ServletException(e.getMessage()); } servlet.setEffectiveUser(doAsUserFromQuery); } else { String effectiveUser = request.getRemoteUser(); servlet.setEffectiveUser(effectiveUser); } super.service(request, response); }
3.68
dubbo_ServiceAnnotationPostProcessor_findServiceAnnotation
/** * Find the {@link Annotation annotation} of @Service * * @param beanClass the {@link Class class} of Bean * @return <code>null</code> if not found * @since 2.7.3 */ private Annotation findServiceAnnotation(Class<?> beanClass) { return serviceAnnotationTypes.stream() .map(annotationType -> ClassUtils.isPresent( "org.springframework.core.annotation.AnnotatedElementUtils", Thread.currentThread().getContextClassLoader()) && ReflectUtils.hasMethod( org.springframework.core.annotation.AnnotatedElementUtils.class, "findMergedAnnotation") ? org.springframework.core.annotation.AnnotatedElementUtils.findMergedAnnotation( beanClass, annotationType) : org.apache.dubbo.common.utils.AnnotationUtils.findAnnotation(beanClass, annotationType)) .filter(Objects::nonNull) .findFirst() .orElse(null); }
3.68
framework_TableRowElement_getCell
/** * Returns cell from current row by index. Returns the same element as * $(TableElement.class).first().getCell(row, col). * * @see com.vaadin.testbench.elements.TableElement#getCell(int, int) * @param col * column index * @return cell from current row by index. */ public TestBenchElement getCell(int col) { List<WebElement> cells = getWrappedElement() .findElements(By.tagName("td")); if (col >= cells.size()) { throw new NoSuchColumnException(); } WebElement cellContent = cells.get(col); return wrapElement(cellContent.findElement(By.xpath("./*")), getCommandExecutor()); }
3.68
hudi_SparkInsertOverwritePartitioner_getSmallFiles
/** * Returns a list of small files in the given partition path. */ @Override protected List<SmallFile> getSmallFiles(String partitionPath) { // for overwrite, we ignore all existing files. So do not consider any file to be smallFiles return Collections.emptyList(); }
3.68
flink_TableChange_getConstraint
/** Returns the unique constraint to add. */ public UniqueConstraint getConstraint() { return constraint; }
3.68
hudi_RocksDbDiskMap_iterator
/** * Custom iterator to iterate over values written to disk. */ @Override public Iterator<R> iterator() { return getRocksDb().iterator(ROCKSDB_COL_FAMILY); }
3.68
flink_AbstractPagedInputView_getCurrentSegmentLimit
/** * Gets the current limit in the memory segment. This value points to the byte one after the * last valid byte in the memory segment. * * @return The current limit in the memory segment. * @see #getCurrentPositionInSegment() */ public int getCurrentSegmentLimit() { return this.limitInSegment; }
3.68
hudi_HoodieTable_waitForAllFiles
/** * Ensures all files passed either appear or disappear. * * @param context HoodieEngineContext * @param groupByPartition Files grouped by partition * @param visibility Appear/Disappear */ private void waitForAllFiles(HoodieEngineContext context, Map<String, List<Pair<String, String>>> groupByPartition, FileVisibility visibility) { // This will either ensure all files to be deleted are present. context.setJobStatus(this.getClass().getSimpleName(), "Wait for all files to appear/disappear: " + config.getTableName()); boolean checkPassed = context.map(new ArrayList<>(groupByPartition.entrySet()), partitionWithFileList -> waitForCondition(partitionWithFileList.getKey(), partitionWithFileList.getValue().stream(), visibility), config.getFinalizeWriteParallelism()) .stream().allMatch(x -> x); if (!checkPassed) { throw new HoodieIOException("Consistency check failed to ensure all files " + visibility); } }
3.68
hadoop_ByteArrayDecodingState_checkOutputBuffers
/** * Check and ensure the buffers are of the desired length. * @param buffers the buffers to check */ void checkOutputBuffers(byte[][] buffers) { for (byte[] buffer : buffers) { if (buffer == null) { throw new HadoopIllegalArgumentException( "Invalid buffer found, not allowing null"); } if (buffer.length != decodeLength) { throw new HadoopIllegalArgumentException( "Invalid buffer not of length " + decodeLength); } } }
3.68
hadoop_TypedBytesInput_readRawInt
/** * Reads the raw bytes following a <code>Type.INT</code> code. * @return the obtained bytes sequence * @throws IOException */ public byte[] readRawInt() throws IOException { byte[] bytes = new byte[5]; bytes[0] = (byte) Type.INT.code; in.readFully(bytes, 1, 4); return bytes; }
3.68
hbase_AsyncTableBuilder_setRetryPauseForCQTBE
/** * Set the base pause time for retrying when {@link HBaseServerException#isServerOverloaded()}. We * use an exponential policy to generate sleep time when retrying. * <p/> * This value should be greater than the normal pause value which could be set with the above * {@link #setRetryPause(long, TimeUnit)} method, as usually * {@link HBaseServerException#isServerOverloaded()} means the server is overloaded. We just use * the normal pause value for {@link HBaseServerException#isServerOverloaded()} if here you * specify a smaller value. * @see #setRetryPause(long, TimeUnit) * @deprecated Since 2.5.0, will be removed in 4.0.0. Please use * {@link #setRetryPauseForServerOverloaded(long, TimeUnit)} instead. */ @Deprecated default AsyncTableBuilder<C> setRetryPauseForCQTBE(long pause, TimeUnit unit) { return setRetryPauseForServerOverloaded(pause, unit); }
3.68
AreaShop_GeneralRegion_getGroups
/** * Get the groups that this region is added to. * @return A Set with all groups of this region */ public Set<RegionGroup> getGroups() { Set<RegionGroup> result = new HashSet<>(); for(RegionGroup group : plugin.getFileManager().getGroups()) { if(group.isMember(this)) { result.add(group); } } return result; }
3.68
hadoop_RollingFileSystemSink_getNextIdToTry
/** * Return the next ID suffix to use when creating the log file. This method * will look at the files in the directory, find the one with the highest * ID suffix, and 1 to that suffix, and return it. This approach saves a full * linear probe, which matters in the case where there are a large number of * log files. * * @param initial the base file path * @param lastId the last ID value that was used * @return the next ID to try * @throws IOException thrown if there's an issue querying the files in the * directory */ private int getNextIdToTry(Path initial, int lastId) throws IOException { RemoteIterator<LocatedFileStatus> files = fileSystem.listFiles(currentDirPath, true); String base = initial.toString(); int id = lastId; while (files.hasNext()) { String file = files.next().getPath().getName(); if (file.startsWith(base)) { int fileId = extractId(file); if (fileId > id) { id = fileId; } } } // Return either 1 more than the highest we found or 1 more than the last // ID used (if no ID was found). return id + 1; }
3.68
flink_TableConfig_getLocalTimeZone
/** * Returns the current session time zone id. It is used when converting to/from {@code TIMESTAMP * WITH LOCAL TIME ZONE}. See {@link #setLocalTimeZone(ZoneId)} for more details. * * @see org.apache.flink.table.types.logical.LocalZonedTimestampType */ public ZoneId getLocalTimeZone() { final String zone = configuration.getString(TableConfigOptions.LOCAL_TIME_ZONE); if (TableConfigOptions.LOCAL_TIME_ZONE.defaultValue().equals(zone)) { return ZoneId.systemDefault(); } validateTimeZone(zone); return ZoneId.of(zone); }
3.68
framework_VTabsheetPanel_add
/** * Adds the specified widget to the deck. * * @param w * the widget to be added */ @Override public void add(Widget w) { Element el = createContainerElement(); DOM.appendChild(getElement(), el); super.add(w, el); }
3.68