name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
pulsar_AutoConsumeSchema_fetchSchemaIfNeeded | /**
* It may happen that the schema is not loaded but we need it, for instance in order to call getSchemaInfo()
* We cannot call this method in getSchemaInfo, because getSchemaInfo is called in many
* places and we will introduce lots of deadlocks.
*/
public void fetchSchemaIfNeeded(SchemaVersion schemaVersion) throws SchemaSerializationException {
if (schemaVersion == null) {
schemaVersion = BytesSchemaVersion.of(new byte[0]);
}
if (!schemaMap.containsKey(schemaVersion)) {
if (schemaInfoProvider == null) {
throw new SchemaSerializationException("Can't get accurate schema information for topic " + topicName
+ "using AutoConsumeSchema because SchemaInfoProvider is not set yet");
} else {
SchemaInfo schemaInfo = null;
try {
schemaInfo = schemaInfoProvider.getSchemaByVersion(schemaVersion.bytes()).get();
if (schemaInfo == null) {
// schemaless topic
schemaInfo = BytesSchema.of().getSchemaInfo();
}
} catch (InterruptedException | ExecutionException e) {
if (e instanceof InterruptedException) {
Thread.currentThread().interrupt();
}
log.error("Can't get last schema for topic {} using AutoConsumeSchema", topicName);
throw new SchemaSerializationException(e.getCause());
}
// schemaInfo null means that there is no schema attached to the topic.
Schema<?> schema = generateSchema(schemaInfo);
schema.setSchemaInfoProvider(schemaInfoProvider);
setSchema(schemaVersion, schema);
log.info("Configure {} schema {} for topic {} : {}",
componentName, schemaVersion, topicName, schemaInfo.getSchemaDefinition());
}
}
} | 3.68 |
hadoop_OSSListResult_isV1 | /**
* Is this a v1 API result or v2?
* @return true if v1, false if v2
*/
public boolean isV1() {
return v1Result != null;
} | 3.68 |
hadoop_IOStatisticsBinding_fromStorageStatistics | /**
* Create IOStatistics from a storage statistics instance.
*
* This will be updated as the storage statistics change.
* @param storageStatistics source data.
* @return an IO statistics source.
*/
public static IOStatistics fromStorageStatistics(
StorageStatistics storageStatistics) {
DynamicIOStatisticsBuilder builder = dynamicIOStatistics();
Iterator<StorageStatistics.LongStatistic> it = storageStatistics
.getLongStatistics();
while (it.hasNext()) {
StorageStatistics.LongStatistic next = it.next();
builder.withLongFunctionCounter(next.getName(),
k -> storageStatistics.getLong(k));
}
return builder.build();
} | 3.68 |
dubbo_TTable_addRow | /**
* Add a row
*/
public TTable addRow(Object... columnDataArray) {
if (null != columnDataArray) {
for (int index = 0; index < columnDefineArray.length; index++) {
final ColumnDefine columnDefine = columnDefineArray[index];
if (index < columnDataArray.length && null != columnDataArray[index]) {
columnDefine.rows.add(replaceTab(columnDataArray[index].toString()));
} else {
columnDefine.rows.add(EMPTY_STRING);
}
}
}
return this;
} | 3.68 |
hbase_ProcedureMember_receivedReachedGlobalBarrier | /**
* Notification that procedure coordinator has reached the global barrier
* @param procName name of the subprocedure that should start running the in-barrier phase
*/
public void receivedReachedGlobalBarrier(String procName) {
Subprocedure subproc = subprocs.get(procName);
if (subproc == null) {
LOG.warn("Unexpected reached globa barrier message for Sub-Procedure '" + procName + "'");
return;
}
if (LOG.isTraceEnabled()) {
LOG.trace("reached global barrier message for Sub-Procedure '" + procName + "'");
}
subproc.receiveReachedGlobalBarrier();
} | 3.68 |
framework_DropTargetExtensionConnector_sendDropEventToServer | /**
* Initiates a server RPC for the drop event.
*
* @param types
* List of data types from {@code DataTransfer.types} object.
* @param data
* Map containing all types and corresponding data from the
* {@code
* DataTransfer} object.
* @param dropEffect
* The desired drop effect.
*/
protected void sendDropEventToServer(List<String> types,
Map<String, String> data, String dropEffect,
NativeEvent dropEvent) {
// Build mouse event details for the drop event
MouseEventDetails mouseEventDetails = MouseEventDetailsBuilder
.buildMouseEventDetails(dropEvent, getDropTargetElement());
// Send data to server with RPC
getRpcProxy(DropTargetRpc.class).drop(types, data, dropEffect,
mouseEventDetails);
} | 3.68 |
pulsar_OwnedBundle_getNamespaceBundle | /**
* Access to the namespace name.
*
* @return NamespaceName
*/
public NamespaceBundle getNamespaceBundle() {
return this.bundle;
} | 3.68 |
hibernate-validator_AnnotationMetaDataProvider_retrieveBeanConfiguration | /**
* @param beanClass The bean class for which to retrieve the meta data
*
* @return Retrieves constraint related meta data from the annotations of the given type.
*/
private <T> BeanConfiguration<T> retrieveBeanConfiguration(Class<T> beanClass) {
Set<ConstrainedElement> constrainedElements = getFieldMetaData( beanClass );
constrainedElements.addAll( getMethodMetaData( beanClass ) );
constrainedElements.addAll( getConstructorMetaData( beanClass ) );
Set<MetaConstraint<?>> classLevelConstraints = getClassLevelConstraints( beanClass );
if ( !classLevelConstraints.isEmpty() ) {
ConstrainedType classLevelMetaData =
new ConstrainedType(
ConfigurationSource.ANNOTATION,
beanClass,
classLevelConstraints
);
constrainedElements.add( classLevelMetaData );
}
return new BeanConfiguration<>(
ConfigurationSource.ANNOTATION,
beanClass,
constrainedElements,
getDefaultGroupSequence( beanClass ),
getDefaultGroupSequenceProvider( beanClass )
);
} | 3.68 |
rocketmq-connect_RocketMqAdminUtil_initDefaultLitePullConsumer | /**
* init default lite pull consumer
*
* @param config
* @param autoCommit
* @return
* @throws MQClientException
*/
public static DefaultLitePullConsumer initDefaultLitePullConsumer(RocketMqConfig config,
boolean autoCommit) throws MQClientException {
DefaultLitePullConsumer consumer = null;
if (Objects.isNull(consumer)) {
if (StringUtils.isBlank(config.getAccessKey()) && StringUtils.isBlank(config.getSecretKey())) {
consumer = new DefaultLitePullConsumer(
config.getGroupId()
);
} else {
consumer = new DefaultLitePullConsumer(
config.getGroupId(),
getAclRPCHook(config.getAccessKey(), config.getSecretKey())
);
}
}
consumer.setNamesrvAddr(config.getNamesrvAddr());
String uniqueName = createUniqInstance(config.getNamesrvAddr());
consumer.setInstanceName(uniqueName);
consumer.setUnitName(uniqueName);
consumer.setAutoCommit(autoCommit);
consumer.setConsumeFromWhere(ConsumeFromWhere.CONSUME_FROM_FIRST_OFFSET);
return consumer;
} | 3.68 |
framework_AbstractFieldConnector_isRequiredIndicatorVisible | /**
* Checks whether the required indicator should be shown for the field.
*
* Required indicators are hidden if the field or its data source is
* read-only.
*
* @return true if required indicator should be shown
*/
@Override
public boolean isRequiredIndicatorVisible() {
return getState().required && !isReadOnly();
} | 3.68 |
querydsl_ComparableExpression_gtAny | /**
* Create a {@code this > any right} expression
*
* @param right rhs of the comparison
* @return this > any right
*/
public BooleanExpression gtAny(SubQueryExpression<? extends T> right) {
return gt(ExpressionUtils.any(right));
} | 3.68 |
morf_AbstractConnectionResources_sqlDialect | /**
* @see org.alfasoftware.morf.jdbc.ConnectionResources#sqlDialect()
*/
@Override
public final SqlDialect sqlDialect() {
return findDatabaseType().sqlDialect(getSchemaName());
} | 3.68 |
flink_SingleOutputStreamOperator_setBufferTimeout | /**
* Sets the buffering timeout for data produced by this operation. The timeout defines how long
* data may linger in a partially full buffer before being sent over the network.
*
* <p>Lower timeouts lead to lower tail latencies, but may affect throughput. Timeouts of 1 ms
* still sustain high throughput, even for jobs with high parallelism.
*
* <p>A value of '-1' means that the default buffer timeout should be used. A value of '0'
* indicates that no buffering should happen, and all records/events should be immediately sent
* through the network, without additional buffering.
*
* @param timeoutMillis The maximum time between two output flushes.
* @return The operator with buffer timeout set.
*/
public SingleOutputStreamOperator<T> setBufferTimeout(long timeoutMillis) {
checkArgument(timeoutMillis >= -1, "timeout must be >= -1");
transformation.setBufferTimeout(timeoutMillis);
return this;
} | 3.68 |
flink_AbstractStreamOperator_setProcessingTimeService | /**
* @deprecated The {@link ProcessingTimeService} instance should be passed by the operator
* constructor and this method will be removed along with {@link SetupableStreamOperator}.
*/
@Deprecated
public void setProcessingTimeService(ProcessingTimeService processingTimeService) {
this.processingTimeService = Preconditions.checkNotNull(processingTimeService);
} | 3.68 |
hadoop_LogParserUtil_setDateFormat | /**
* Set date format for the {@link LogParser}.
*
* @param datePattern the date pattern in the log.
*/
public void setDateFormat(final String datePattern) {
this.format = new SimpleDateFormat(datePattern);
} | 3.68 |
hadoop_RequestFactoryImpl_build | /**
* Build the request factory.
* @return the factory
*/
public RequestFactory build() {
return new RequestFactoryImpl(this);
} | 3.68 |
hudi_StreamerUtil_getIndexConfig | /**
* Returns the index config with given configuration.
*/
public static HoodieIndexConfig getIndexConfig(Configuration conf) {
return HoodieIndexConfig.newBuilder()
.withIndexType(OptionsResolver.getIndexType(conf))
.withBucketNum(String.valueOf(conf.getInteger(FlinkOptions.BUCKET_INDEX_NUM_BUCKETS)))
.withRecordKeyField(conf.getString(FlinkOptions.RECORD_KEY_FIELD))
.withIndexKeyField(OptionsResolver.getIndexKeyField(conf))
.withBucketIndexEngineType(OptionsResolver.getBucketEngineType(conf))
.withEngineType(EngineType.FLINK)
.build();
} | 3.68 |
hadoop_OBSInputStream_closeStream | /**
* Close a stream: decide whether to abort or close, based on the length of
* the stream and the current position. If a close() is attempted and fails,
* the operation escalates to an abort.
*
* <p>This does not set the {@link #closed} flag.
*
* @param reason reason for stream being closed; used in messages
* @param length length of the stream
* @throws IOException on any failure to close stream
*/
private synchronized void closeStream(final String reason,
final long length)
throws IOException {
if (wrappedStream != null) {
try {
wrappedStream.close();
} catch (IOException e) {
// exception escalates to an abort
LOG.debug("When closing {} stream for {}", uri, reason, e);
throw e;
}
LOG.debug(
"Stream {} : {}; streamPos={}, nextReadPos={},"
+ " request range {}-{} length={}",
uri,
reason,
streamCurrentPos,
nextReadPos,
contentRangeStart,
contentRangeFinish,
length);
wrappedStream = null;
}
} | 3.68 |
hadoop_ExtensionHelper_ifBoundDTExtension | /**
* Invoke an operation on an object if it implements the BoundDTExtension
* interface; returns an optional value.
* @param extension the extension to invoke.
* @param fn function to apply
* @param <V> return type of te function.
* @return an optional value which, if not empty, contains the return value
* of the invoked function. If empty: the object was not of a compatible
* type.
*/
public static <V> Optional<V> ifBoundDTExtension(Object extension,
Function<? super BoundDTExtension, ? extends V> fn) {
if (extension instanceof BoundDTExtension) {
return Optional.of((BoundDTExtension) extension).map(fn);
} else {
return Optional.empty();
}
} | 3.68 |
hadoop_RequestFactoryImpl_prepareRequest | /**
* Preflight preparation of AWS request.
* @param <T> web service request builder
* @return prepared builder.
*/
@Retries.OnceRaw
private <T extends SdkRequest.Builder> T prepareRequest(T t) {
if (requestPreparer != null) {
requestPreparer.prepareRequest(t);
}
return t;
} | 3.68 |
hadoop_BlockMissingException_getOffset | /**
* Returns the offset at which this file is corrupted
* @return offset of corrupted file
*/
public long getOffset() {
return offset;
} | 3.68 |
framework_ServiceInitEvent_addRequestHandler | /**
* Adds a new request handler that will be used by this service. The added
* handler will be run before any of the framework's own request handlers,
* but the ordering relative to other custom handlers is not guaranteed.
*
* @param requestHandler
* the request handler to add, not <code>null</code>
*/
public void addRequestHandler(RequestHandler requestHandler) {
Objects.requireNonNull(requestHandler,
"Request handler cannot be null");
addedRequestHandlers.add(requestHandler);
} | 3.68 |
framework_AbsoluteLayout_getPosition | /**
* Gets the position of a component in the layout. Returns null if component
* is not attached to the layout.
* <p>
* Note that you cannot update the position by updating this object. Call
* {@link #setPosition(Component, ComponentPosition)} with the updated
* {@link ComponentPosition} object.
* </p>
*
* @param component
* The component which position is needed
* @return An instance of ComponentPosition containing the position of the
* component, or null if the component is not enclosed in the
* layout.
*/
public ComponentPosition getPosition(Component component) {
return componentToCoordinates.get(component);
} | 3.68 |
flink_VertexFlameGraphFactory_createOffCpuFlameGraph | /**
* Converts {@link VertexThreadInfoStats} into a FlameGraph representing blocked (Off-CPU)
* threads.
*
* <p>Includes threads in states Thread.State.[TIMED_WAITING, BLOCKED, WAITING].
*
* @param sample Thread details sample containing stack traces.
* @return FlameGraph data structure.
*/
public static VertexFlameGraph createOffCpuFlameGraph(VertexThreadInfoStats sample) {
EnumSet<Thread.State> included =
EnumSet.of(Thread.State.TIMED_WAITING, Thread.State.BLOCKED, Thread.State.WAITING);
return createFlameGraphFromSample(sample, included);
} | 3.68 |
framework_VAbstractPopupCalendar_getOpenCalenderPanelKey | /**
* Get the key code that opens the calendar panel. By default it is the down
* key but you can override this to be whatever you like
*
* @return the key code that opens the calendar panel
*/
protected int getOpenCalenderPanelKey() {
return KeyCodes.KEY_DOWN;
} | 3.68 |
hbase_CellModel_getValue | /** Returns the value */
public byte[] getValue() {
return value;
} | 3.68 |
flink_SavepointWriter_changeOperatorIdentifier | /**
* Changes the identifier of an operator.
*
* <p>This method is comparatively cheap since it only modifies savepoint metadata without
* reading the entire savepoint data.
*
* <p>Use-cases include, but are not limited to:
*
* <ul>
* <li>assigning a UID to an operator that did not have a UID assigned before
* <li>changing the UID of an operator
* <li>swapping the states of 2 operators
* </ul>
*
* <p>Identifier changes are applied after all other operations; in the following example the
* savepoint will only contain UID_2.
*
* <pre>
* SavepointWriter savepoint = ...
* savepoint.withOperator(UID_1, ...)
* savepoint.changeOperatorIdentifier(UID_1, UID_2)
* savepoint.write(...)
* </pre>
*
* <p>You cannot define a chain of changes; in the following example the savepoint will only
* contain UID_2.
*
* <pre>
* SavepointWriter savepoint = ...
* savepoint.withOperator(UID_1, ...)
* savepoint.changeOperatorIdentifier(UID_1, UID_2)
* savepoint.changeOperatorIdentifier(UID_2, UID_3)
* savepoint.write(...)
* </pre>
*
* @param from operator whose identifier should be changed
* @param to desired identifier
* @return The modified savepoint.
*/
public SavepointWriter changeOperatorIdentifier(
OperatorIdentifier from, OperatorIdentifier to) {
this.uidTransformationMap.put(from, to);
return this;
} | 3.68 |
hbase_ClientZKSyncer_start | /**
* Starts the syncer
* @throws KeeperException if error occurs when trying to create base nodes on client ZK
*/
public void start() throws KeeperException {
LOG.debug("Starting " + getClass().getSimpleName());
this.watcher.registerListener(this);
// create base znode on remote ZK
ZKUtil.createWithParents(clientZkWatcher, watcher.getZNodePaths().baseZNode);
// set znodes for client ZK
Set<String> paths = getPathsToWatch();
LOG.debug("ZNodes to watch: {}", paths);
// initialize queues and threads
for (String path : paths) {
startNewSyncThread(path);
}
} | 3.68 |
hadoop_ReadBufferWorker_run | /**
* Waits until a buffer becomes available in ReadAheadQueue.
* Once a buffer becomes available, reads the file specified in it and then posts results back to buffer manager.
* Rinse and repeat. Forever.
*/
public void run() {
try {
UNLEASH_WORKERS.await();
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
ReadBufferManager bufferManager = ReadBufferManager.getBufferManager();
ReadBuffer buffer;
while (true) {
try {
buffer = bufferManager.getNextBlockToRead(); // blocks, until a buffer is available for this thread
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
return;
}
if (buffer != null) {
try {
// do the actual read, from the file.
int bytesRead = buffer.getStream().readRemote(
buffer.getOffset(),
buffer.getBuffer(),
0,
// If AbfsInputStream was created with bigger buffer size than
// read-ahead buffer size, make sure a valid length is passed
// for remote read
Math.min(buffer.getRequestedLength(), buffer.getBuffer().length),
buffer.getTracingContext());
bufferManager.doneReading(buffer, ReadBufferStatus.AVAILABLE, bytesRead); // post result back to ReadBufferManager
} catch (IOException ex) {
buffer.setErrException(ex);
bufferManager.doneReading(buffer, ReadBufferStatus.READ_FAILED, 0);
} catch (Exception ex) {
buffer.setErrException(new PathIOException(buffer.getStream().getPath(), ex));
bufferManager.doneReading(buffer, ReadBufferStatus.READ_FAILED, 0);
}
}
}
} | 3.68 |
morf_ExceptSetOperator_toString | /**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return "EXCEPT " + selectStatement;
} | 3.68 |
hudi_InternalSchemaUtils_reBuildFilterName | /**
* A helper function to help correct the colName of pushed filters.
*
* @param name origin col name from pushed filters.
* @param fileSchema the real schema of avro/parquet file.
* @param querySchema the query schema which query engine produced.
* @return a corrected name.
*/
public static String reBuildFilterName(String name, InternalSchema fileSchema, InternalSchema querySchema) {
int nameId = querySchema.findIdByName(name);
if (nameId == -1) {
throw new IllegalArgumentException(String.format("cannot find filter col name:%s from querySchema: %s", name, querySchema));
}
if (fileSchema.findField(nameId) == null) {
// added operation found
// the read file does not contain current col, so current colFilter is invalid
return "";
} else {
if (name.equals(fileSchema.findFullName(nameId))) {
// no change happened on current col
return name;
} else {
// find rename operation on current col
// return the name from fileSchema
return fileSchema.findFullName(nameId);
}
}
} | 3.68 |
framework_BasicForwardHandler_forward | /*
* (non-Javadoc)
*
* @see com.vaadin.addon.calendar.ui.CalendarComponentEvents.ForwardHandler#
* forward
* (com.vaadin.addon.calendar.ui.CalendarComponentEvents.ForwardEvent)
*/
@Override
public void forward(ForwardEvent event) {
Date start = event.getComponent().getStartDate();
Date end = event.getComponent().getEndDate();
// calculate amount to move forward
int durationInDays = (int) (((end.getTime()) - start.getTime())
/ DateConstants.DAYINMILLIS);
// for week view durationInDays = 7, for day view durationInDays = 1
durationInDays++;
// set new start and end times
Calendar javaCalendar = Calendar.getInstance();
javaCalendar.setTime(start);
javaCalendar.add(Calendar.DATE, durationInDays);
Date newStart = javaCalendar.getTime();
javaCalendar.setTime(end);
javaCalendar.add(Calendar.DATE, durationInDays);
Date newEnd = javaCalendar.getTime();
if (start.equals(end)) { // day view
int firstDay = event.getComponent().getFirstVisibleDayOfWeek();
int lastDay = event.getComponent().getLastVisibleDayOfWeek();
int dayOfWeek = javaCalendar.get(Calendar.DAY_OF_WEEK);
// we suppose that 7 >= lastDay >= firstDay >= 1
while (!(firstDay <= dayOfWeek && dayOfWeek <= lastDay)) {
javaCalendar.add(Calendar.DATE, 1);
dayOfWeek = javaCalendar.get(Calendar.DAY_OF_WEEK);
}
newStart = javaCalendar.getTime();
newEnd = javaCalendar.getTime();
}
setDates(event, newStart, newEnd);
} | 3.68 |
hadoop_FederationStateStoreFacade_createRetryPolicy | /**
* Create a RetryPolicy for {@code FederationStateStoreFacade}. In case of
* failure, it retries for:
* <ul>
* <li>{@code FederationStateStoreRetriableException}</li>
* <li>{@code CacheLoaderException}</li>
* </ul>
*
* @param conf the updated configuration
* @return the RetryPolicy for FederationStateStoreFacade
*/
public static RetryPolicy createRetryPolicy(Configuration conf) {
// Retry settings for StateStore
RetryPolicy basePolicy = RetryPolicies.exponentialBackoffRetry(
conf.getInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES, Integer.SIZE),
conf.getLong(YarnConfiguration.CLIENT_FAILOVER_SLEEPTIME_BASE_MS,
YarnConfiguration.DEFAULT_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS),
TimeUnit.MILLISECONDS);
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap = new HashMap<>();
exceptionToPolicyMap.put(FederationStateStoreRetriableException.class,
basePolicy);
exceptionToPolicyMap.put(CacheLoaderException.class, basePolicy);
exceptionToPolicyMap.put(PoolInitializationException.class, basePolicy);
RetryPolicy retryPolicy = RetryPolicies.retryByException(
RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
return retryPolicy;
} | 3.68 |
hudi_HoodieLogFileReader_readVersion | /**
* Read log format version from log file.
*/
private HoodieLogFormat.LogFormatVersion readVersion() throws IOException {
return new HoodieLogFormatVersion(inputStream.readInt());
} | 3.68 |
hibernate-validator_ExecutableMetaData_getCrossParameterConstraints | /**
* Returns the cross-parameter constraints declared for the represented
* method or constructor.
*
* @return the cross-parameter constraints declared for the represented
* method or constructor. May be empty but will never be
* {@code null}.
*/
public Set<MetaConstraint<?>> getCrossParameterConstraints() {
return crossParameterConstraints;
} | 3.68 |
hudi_BaseHoodieDateTimeParser_getConfigInputDateFormatDelimiter | /**
* Returns the input date format delimiter, comma by default.
*/
public String getConfigInputDateFormatDelimiter() {
return this.configInputDateFormatDelimiter;
} | 3.68 |
hadoop_HHUtil_findFirstValidInput | /**
* Find the valid input from all the inputs.
*
* @param <T> Generics Type T.
* @param inputs input buffers to look for valid input
* @return the first valid input
*/
public static <T> T findFirstValidInput(T[] inputs) {
for (T input : inputs) {
if (input != null) {
return input;
}
}
throw new HadoopIllegalArgumentException(
"Invalid inputs are found, all being null");
} | 3.68 |
morf_FieldLiteral_getDataType | /**
* @return the dataType
*/
public DataType getDataType() {
return dataType;
} | 3.68 |
hadoop_CommitContext_getJobContext | /**
* Job Context.
* @return job context.
*/
public JobContext getJobContext() {
return jobContext;
} | 3.68 |
morf_OracleMetaDataProvider_runSQL | /**
* Run some SQL, and tidy up afterwards.
*
* Note this assumes a predicate on the schema name will be present with a single parameter in position "1".
*
* @param sql The SQL to run.
* @param handler The handler to handle the result-set.
*/
private void runSQL(String sql, ResultSetHandler handler) {
try {
PreparedStatement statement = connection.prepareStatement(sql);
try {
// We'll inevitably need a lot of meta data so may as well get it in big chunks.
statement.setFetchSize(100);
// pass through the schema name
statement.setString(1, schemaName);
ResultSet resultSet = statement.executeQuery();
try {
handler.handle(resultSet);
} finally {
resultSet.close();
}
} finally {
statement.close();
}
} catch (SQLException sqle) {
throw new RuntimeSqlException("Error running SQL: " + sql, sqle);
}
} | 3.68 |
flink_ErrorInfo_getException | /** Returns the serialized form of the original exception. */
public SerializedThrowable getException() {
return exception;
} | 3.68 |
flink_HiveParserProjectWindowTrimmer_trimProjectWindow | /**
* Remove the redundant nodes from the project node which contains over window node.
*
* @param selectProject the project node contains selected fields in top of the project node
* with window
* @param projectWithWindow the project node which contains windows in the end of project
* expressions.
* @return the new project node after trimming
*/
public static RelNode trimProjectWindow(
Project selectProject,
Project projectWithWindow,
Map<RelNode, HiveParserRowResolver> relToRowResolver,
Map<RelNode, Map<String, Integer>> relToHiveColNameCalcitePosMap) {
// get the over window nodes
List<RexOver> rexOverList =
projectWithWindow.getProjects().stream()
.filter(node -> node instanceof RexOver)
.map(node -> (RexOver) node)
.collect(Collectors.toList());
// the fields size excluding the over window field in the project node with window
int windowInputColumn = projectWithWindow.getProjects().size() - rexOverList.size();
// find all field referred by over window and select project node
final ImmutableBitSet beReferred =
findReference(selectProject, rexOverList, windowInputColumn);
// If all the input columns are referred,
// it is impossible to trim anyone of them out
if (beReferred.cardinality() == windowInputColumn) {
return selectProject;
}
// Keep only the fields which are referred and the over window field
final List<RexNode> exps = new ArrayList<>();
final RelDataTypeFactory.Builder builder =
projectWithWindow.getCluster().getTypeFactory().builder();
final List<RelDataTypeField> rowTypeWindowInput =
projectWithWindow.getRowType().getFieldList();
// add index for referred field
List<Integer> remainIndexInProjectWindow = new ArrayList<>(beReferred.asList());
// add index for the over window field
remainIndexInProjectWindow.addAll(
IntStream.range(windowInputColumn, projectWithWindow.getProjects().size())
.boxed()
.collect(Collectors.toList()));
for (int index : remainIndexInProjectWindow) {
exps.add(projectWithWindow.getProjects().get(index));
builder.add(rowTypeWindowInput.get(index));
}
// As the un-referred columns are trimmed,
// the indices specified in select project would need to be adjusted
final RexShuttle indexAdjustment =
new RexShuttle() {
@Override
public RexNode visitInputRef(RexInputRef inputRef) {
final int newIndex =
getAdjustedIndex(
inputRef.getIndex(), beReferred, windowInputColumn);
return new RexInputRef(newIndex, inputRef.getType());
}
};
// adjust the top select project node
final List<RexNode> topProjExps = indexAdjustment.visitList(selectProject.getProjects());
// create a project with the project trimmed
LogicalProject trimmedProject =
LogicalProject.create(
projectWithWindow.getInput(),
Collections.emptyList(),
exps,
builder.build());
// put row resolver for newly trimmed project node
HiveParserRowResolver oldRowResolver = relToRowResolver.remove(projectWithWindow);
if (oldRowResolver != null) {
HiveParserRowResolver newProjectRR = new HiveParserRowResolver();
List<ColumnInfo> oldColumnsInfo = oldRowResolver.getColumnInfos();
for (int index : remainIndexInProjectWindow) {
newProjectRR.put(
oldColumnsInfo.get(index).getTabAlias(),
oldColumnsInfo.get(index).getAlias(),
oldColumnsInfo.get(index));
}
relToRowResolver.put(trimmedProject, newProjectRR);
relToHiveColNameCalcitePosMap.remove(projectWithWindow);
relToHiveColNameCalcitePosMap.put(
trimmedProject, buildHiveToCalciteColumnMap(newProjectRR));
}
// create new project with adjusted field ref
RelNode newProject =
LogicalProject.create(
trimmedProject,
Collections.emptyList(),
topProjExps,
selectProject.getRowType());
// put row resolver for newly project node
relToRowResolver.put(newProject, relToRowResolver.remove(selectProject));
relToHiveColNameCalcitePosMap.put(
newProject, relToHiveColNameCalcitePosMap.remove(selectProject));
return newProject;
} | 3.68 |
hbase_MultiRowRangeFilter_isInitialized | /**
* Returns true if this class has been initialized by calling {@link #initialize(boolean)}.
*/
public boolean isInitialized() {
return initialized;
} | 3.68 |
morf_AbstractSelectStatement_orderBy | /**
* Specifies the fields by which to order the result set. For use in builder code.
* See {@link #orderBy(AliasedField...)} for the DSL version.
*
* @param orderFields the fields to order by
* @return a new select statement with the change applied.
*/
public T orderBy(Iterable<AliasedField> orderFields) {
return copyOnWriteOrMutate(
b -> b.orderBy(orderFields),
() -> {
if (orderFields == null) {
throw new IllegalArgumentException("Fields were null in order by clause");
}
// Add the list
Iterables.addAll(orderBys, orderFields);
// Default fields to ascending if no direction has been specified
SqlInternalUtils.defaultOrderByToAscending(orderBys);
}
);
} | 3.68 |
framework_AbstractClientConnector_getResource | /**
* Gets a resource defined using {@link #setResource(String, Resource)} with
* the corresponding key.
*
* @param key
* the string identifier of the resource
* @return a resource, or <code>null</code> if there's no resource
* associated with the given key
*
* @see #setResource(String, Resource)
*/
protected Resource getResource(String key) {
return ResourceReference
.getResource(getState(false).resources.get(key));
} | 3.68 |
flink_StreamContextEnvironment_collectNotAllowedConfigurations | /**
* Collects programmatic configuration changes.
*
* <p>Configuration is spread across instances of {@link Configuration} and POJOs (e.g. {@link
* ExecutionConfig}), so we need to have logic for comparing both. For supporting wildcards, the
* first can be accomplished by simply removing keys, the latter by setting equal fields before
* comparison.
*/
private Collection<String> collectNotAllowedConfigurations() {
if (programConfigEnabled) {
return Collections.emptyList();
}
final List<String> errors = new ArrayList<>();
final Configuration clusterConfigMap = new Configuration(clusterConfiguration);
// Removal must happen on Configuration objects (not instances of Map)
// to also ignore map-typed config options with prefix key notation
removeProgramConfigWildcards(clusterConfigMap);
checkMainConfiguration(clusterConfigMap, errors);
checkCheckpointConfig(clusterConfigMap, errors);
checkExecutionConfig(clusterConfigMap, errors);
return errors;
} | 3.68 |
framework_FieldGroup_setBuffered | /**
* Sets the buffered mode for the bound fields.
* <p>
* When buffered mode is on the item will not be updated until
* {@link #commit()} is called. If buffered mode is off the item will be
* updated once the fields are updated.
* </p>
* <p>
* The default is to use buffered mode.
* </p>
*
* @see Field#setBuffered(boolean)
* @param buffered
* true to turn on buffered mode, false otherwise
*/
public void setBuffered(boolean buffered) {
if (buffered == this.buffered) {
return;
}
this.buffered = buffered;
for (Field<?> field : getFields()) {
field.setBuffered(buffered);
}
} | 3.68 |
flink_JsonRowSchemaConverter_convert | /**
* Converts a JSON schema into Flink's type information. Throws an exception if the schema
* cannot converted because of loss of precision or too flexible schema.
*
* <p>The converter can resolve simple schema references to solve those cases where entities are
* defined at the beginning and then used throughout a document.
*/
@SuppressWarnings("unchecked")
public static <T> TypeInformation<T> convert(String jsonSchema) {
Preconditions.checkNotNull(jsonSchema, "JSON schema");
final ObjectMapper mapper = JacksonMapperFactory.createObjectMapper();
mapper.getFactory()
.enable(JsonParser.Feature.ALLOW_COMMENTS)
.enable(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES)
.enable(JsonParser.Feature.ALLOW_SINGLE_QUOTES);
final JsonNode node;
try {
node = mapper.readTree(jsonSchema);
} catch (IOException e) {
throw new IllegalArgumentException("Invalid JSON schema.", e);
}
return (TypeInformation<T>) convertType("<root>", node, node);
} | 3.68 |
hudi_StringUtils_emptyToNull | /**
* Returns the given string if it is nonempty; {@code null} otherwise.
*
* @param string the string to test and possibly return
* @return {@code string} itself if it is nonempty; {@code null} if it is empty or null
*/
public static @Nullable String emptyToNull(@Nullable String string) {
return stringIsNullOrEmpty(string) ? null : string;
} | 3.68 |
flink_FlinkTypeSystem_adjustType | /**
* Java numeric will always have invalid precision/scale, use its default decimal
* precision/scale instead.
*/
private RelDataType adjustType(RelDataTypeFactory typeFactory, RelDataType relDataType) {
return RelDataTypeFactoryImpl.isJavaType(relDataType)
? typeFactory.decimalOf(relDataType)
: relDataType;
} | 3.68 |
flink_BinaryStringDataUtil_splitByWholeSeparatorPreserveAllTokens | /**
* Splits the provided text into an array, separator string specified.
*
* <p>The separator is not included in the returned String array. Adjacent separators are
* treated as separators for empty tokens.
*
* <p>A {@code null} separator splits on whitespace.
*
* <pre>
* "".splitByWholeSeparatorPreserveAllTokens(*) = []
* "ab de fg".splitByWholeSeparatorPreserveAllTokens(null) = ["ab", "de", "fg"]
* "ab de fg".splitByWholeSeparatorPreserveAllTokens(null) = ["ab", "", "", "de", "fg"]
* "ab:cd:ef".splitByWholeSeparatorPreserveAllTokens(":") = ["ab", "cd", "ef"]
* "ab-!-cd-!-ef".splitByWholeSeparatorPreserveAllTokens("-!-") = ["ab", "cd", "ef"]
* </pre>
*
* <p>Note: returned binary strings reuse memory segments from the input str.
*
* @param separator String containing the String to be used as a delimiter, {@code null} splits
* on whitespace
* @return an array of parsed Strings, {@code null} if null String was input
*/
public static BinaryStringData[] splitByWholeSeparatorPreserveAllTokens(
BinaryStringData str, BinaryStringData separator) {
str.ensureMaterialized();
final int sizeInBytes = str.getSizeInBytes();
MemorySegment[] segments = str.getSegments();
int offset = str.getOffset();
if (sizeInBytes == 0) {
return EMPTY_STRING_ARRAY;
}
if (separator == null || EMPTY_UTF8.equals(separator)) {
// Split on whitespace.
return splitByWholeSeparatorPreserveAllTokens(str, fromString(" "));
}
separator.ensureMaterialized();
int sepSize = separator.getSizeInBytes();
MemorySegment[] sepSegs = separator.getSegments();
int sepOffset = separator.getOffset();
final ArrayList<BinaryStringData> substrings = new ArrayList<>();
int beg = 0;
int end = 0;
while (end < sizeInBytes) {
end =
SegmentsUtil.find(
segments,
offset + beg,
sizeInBytes - beg,
sepSegs,
sepOffset,
sepSize)
- offset;
if (end > -1) {
if (end > beg) {
// The following is OK, because String.substring( beg, end ) excludes
// the character at the position 'end'.
substrings.add(fromAddress(segments, offset + beg, end - beg));
// Set the starting point for the next search.
// The following is equivalent to beg = end + (separatorLength - 1) + 1,
// which is the right calculation:
beg = end + sepSize;
} else {
// We found a consecutive occurrence of the separator.
substrings.add(EMPTY_UTF8);
beg = end + sepSize;
}
} else {
// String.substring( beg ) goes from 'beg' to the end of the String.
substrings.add(fromAddress(segments, offset + beg, sizeInBytes - beg));
end = sizeInBytes;
}
}
return substrings.toArray(new BinaryStringData[0]);
} | 3.68 |
hbase_RegionStateStore_mergeRegions | // ============================================================================================
// Update Region Merging State helpers
// ============================================================================================
public void mergeRegions(RegionInfo child, RegionInfo[] parents, ServerName serverName,
TableDescriptor htd) throws IOException {
boolean globalScope = htd.hasGlobalReplicationScope();
long time = HConstants.LATEST_TIMESTAMP;
List<Mutation> mutations = new ArrayList<>();
List<RegionInfo> replicationParents = new ArrayList<>();
for (RegionInfo ri : parents) {
long seqNum = globalScope ? getOpenSeqNumForParentRegion(ri) : -1;
// Deletes for merging regions
mutations.add(MetaTableAccessor.makeDeleteFromRegionInfo(ri, time));
if (seqNum > 0) {
mutations
.add(ReplicationBarrierFamilyFormat.makePutForReplicationBarrier(ri, seqNum, time));
replicationParents.add(ri);
}
}
// Put for parent
Put putOfMerged = MetaTableAccessor.makePutFromRegionInfo(child, time);
putOfMerged = addMergeRegions(putOfMerged, Arrays.asList(parents));
// Set initial state to CLOSED.
// NOTE: If initial state is not set to CLOSED then merged region gets added with the
// default OFFLINE state. If Master gets restarted after this step, start up sequence of
// master tries to assign this offline region. This is followed by re-assignments of the
// merged region from resumed {@link MergeTableRegionsProcedure}
MetaTableAccessor.addRegionStateToPut(putOfMerged, RegionInfo.DEFAULT_REPLICA_ID,
RegionState.State.CLOSED);
mutations.add(putOfMerged);
// The merged is a new region, openSeqNum = 1 is fine. ServerName may be null
// if crash after merge happened but before we got to here.. means in-memory
// locations of offlined merged, now-closed, regions is lost. Should be ok. We
// assign the merged region later.
if (serverName != null) {
MetaTableAccessor.addLocation(putOfMerged, serverName, 1, child.getReplicaId());
}
// Add empty locations for region replicas of the merged region so that number of replicas
// can be cached whenever the primary region is looked up from meta
int regionReplication = getRegionReplication(htd);
for (int i = 1; i < regionReplication; i++) {
MetaTableAccessor.addEmptyLocation(putOfMerged, i);
}
// add parent reference for serial replication
if (!replicationParents.isEmpty()) {
ReplicationBarrierFamilyFormat.addReplicationParent(putOfMerged, replicationParents);
}
multiMutate(child, mutations);
} | 3.68 |
flink_Path_read | /**
* Read uri from {@link DataInputView}.
*
* @param in the input view to read the uri.
* @throws IOException if an error happened.
* @deprecated the method is deprecated since Flink 1.19 because Path will no longer implement
* {@link IOReadableWritable} in future versions. Please use {@code
* deserializeFromDataInputView} instead.
* @see <a
* href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-347%3A+Remove+IOReadableWritable
* +serialization+in+Path"> FLIP-347: Remove IOReadableWritable serialization in Path </a>
*/
@Deprecated
@Override
public void read(DataInputView in) throws IOException {
Path path = deserializeFromDataInputView(in);
if (path != null) {
uri = path.toUri();
}
} | 3.68 |
hudi_HFileBootstrapIndex_createReader | /**
* Helper method to create HFile Reader.
*
* @param hFilePath File Path
* @param conf Configuration
* @param fileSystem File System
*/
private static HFile.Reader createReader(String hFilePath, Configuration conf, FileSystem fileSystem) {
LOG.info("Opening HFile for reading :" + hFilePath);
return HoodieHFileUtils.createHFileReader(fileSystem, new HFilePathForReader(hFilePath), new CacheConfig(conf), conf);
} | 3.68 |
hbase_CatalogJanitor_cleanMergeRegion | /**
* If merged region no longer holds reference to the merge regions, archive merge region on hdfs
* and perform deleting references in hbase:meta
* @return true if we delete references in merged region on hbase:meta and archive the files on
* the file system
*/
static boolean cleanMergeRegion(MasterServices services, final RegionInfo mergedRegion,
List<RegionInfo> parents) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Cleaning merged region {}", mergedRegion);
}
Pair<Boolean, Boolean> result =
checkRegionReferences(services, mergedRegion.getTable(), mergedRegion);
if (hasNoReferences(result)) {
if (LOG.isDebugEnabled()) {
LOG.debug(
"Deleting parents ({}) from fs; merged child {} no longer holds references", parents
.stream().map(r -> RegionInfo.getShortNameToLog(r)).collect(Collectors.joining(", ")),
mergedRegion);
}
ProcedureExecutor<MasterProcedureEnv> pe = services.getMasterProcedureExecutor();
GCMultipleMergedRegionsProcedure mergeRegionProcedure =
new GCMultipleMergedRegionsProcedure(pe.getEnvironment(), mergedRegion, parents);
pe.submitProcedure(mergeRegionProcedure);
if (LOG.isDebugEnabled()) {
LOG.debug("Submitted procedure {} for merged region {}", mergeRegionProcedure,
mergedRegion);
}
return true;
} else {
if (LOG.isDebugEnabled()) {
LOG.debug(
"Deferring cleanup up of {} parents of merged region {}, because references "
+ "still exist in merged region or we encountered an exception in checking",
parents.size(), mergedRegion.getEncodedName());
}
}
return false;
} | 3.68 |
framework_Form_registerField | /**
* Register the field with the form. All registered fields are validated
* when the form is validated and also committed when the form is committed.
*
* <p>
* The property id must not be already used in the form.
* </p>
*
*
* @param propertyId
* the Property id of the field.
* @param field
* the Field that should be registered
*/
private void registerField(Object propertyId, Field<?> field) {
if (propertyId == null || field == null) {
return;
}
fields.put(propertyId, field);
field.addListener(fieldValueChangeListener);
if (!propertyIds.contains(propertyId)) {
// adding a field directly
propertyIds.addLast(propertyId);
}
// Update the buffered mode and immediate to match the
// form.
// Should this also include invalidCommitted (#3993)?
field.setBuffered(buffered);
if (isImmediate() && field instanceof AbstractLegacyComponent) {
((AbstractLegacyComponent) field).setImmediate(true);
}
} | 3.68 |
hudi_DateTimeUtils_formatUnixTimestamp | /**
* Convert UNIX_TIMESTAMP to string in given format.
*
* @param unixTimestamp UNIX_TIMESTAMP
* @param timeFormat string time format
*/
public static String formatUnixTimestamp(long unixTimestamp, String timeFormat) {
ValidationUtils.checkArgument(!StringUtils.isNullOrEmpty(timeFormat));
DateTimeFormatter dtf = DateTimeFormatter.ofPattern(timeFormat);
return LocalDateTime
.ofInstant(Instant.ofEpochSecond(unixTimestamp), ZoneId.systemDefault())
.format(dtf);
} | 3.68 |
hmily_SwaggerConfig_apiInfo | /**
* Api info api info.
*
* @return the api info
*/
private ApiInfo apiInfo() {
return new ApiInfoBuilder()
.title("Swagger API")
.description("dubbo分布式事务解决方案之Hmily测试体验")
.license("Apache 2.0")
.licenseUrl("http://www.apache.org/licenses/LICENSE-2.0.html")
.termsOfServiceUrl("")
.version(VERSION)
.contact(new Contact("xiaoyu", "", "[email protected]"))
.build();
} | 3.68 |
hudi_SparkValidatorUtils_runValidators | /**
* Check configured pre-commit validators and run them. Note that this only works for COW tables
*
* Throw error if there are validation failures.
*/
public static void runValidators(HoodieWriteConfig config,
HoodieWriteMetadata<HoodieData<WriteStatus>> writeMetadata,
HoodieEngineContext context,
HoodieTable table,
String instantTime) {
if (StringUtils.isNullOrEmpty(config.getPreCommitValidators())) {
LOG.info("no validators configured.");
} else {
if (!writeMetadata.getWriteStats().isPresent()) {
writeMetadata.setWriteStats(writeMetadata.getWriteStatuses().map(WriteStatus::getStat).collectAsList());
}
Set<String> partitionsModified = writeMetadata.getWriteStats().get().stream().map(HoodieWriteStat::getPartitionPath).collect(Collectors.toSet());
SQLContext sqlContext = new SQLContext(HoodieSparkEngineContext.getSparkContext(context));
// Refresh timeline to ensure validator sees the any other operations done on timeline (async operations such as other clustering/compaction/rollback)
table.getMetaClient().reloadActiveTimeline();
Dataset<Row> afterState = getRecordsFromPendingCommits(sqlContext, partitionsModified, writeMetadata, table, instantTime);
Dataset<Row> beforeState = getRecordsFromCommittedFiles(sqlContext, partitionsModified, table, afterState.schema());
Stream<SparkPreCommitValidator> validators = Arrays.stream(config.getPreCommitValidators().split(","))
.map(validatorClass -> ((SparkPreCommitValidator) ReflectionUtils.loadClass(validatorClass,
new Class<?>[] {HoodieSparkTable.class, HoodieEngineContext.class, HoodieWriteConfig.class},
table, context, config)));
boolean allSuccess = validators.map(v -> runValidatorAsync(v, writeMetadata, beforeState, afterState, instantTime)).map(CompletableFuture::join)
.reduce(true, Boolean::logicalAnd);
if (allSuccess) {
LOG.info("All validations succeeded");
} else {
LOG.error("At least one pre-commit validation failed");
throw new HoodieValidationException("At least one pre-commit validation failed");
}
}
} | 3.68 |
streampipes_NetioRestAdapter_pullData | /**
* pullData is called iteratively according to the polling interval defined in getPollInterval.
*/
@Override
public void pullData() {
try {
NetioAllPowerOutputs allPowerOutputs = requestData();
for (NetioPowerOutput output : allPowerOutputs.getPowerOutputs()) {
Map<String, Object> event = NetioUtils.getEvent(allPowerOutputs.getGobalMeasure(), output);
collector.collect(event);
}
} catch (IOException e) {
e.printStackTrace();
}
} | 3.68 |
flink_StreamConfig_setOperatorNonChainedOutputs | /** Sets the operator level non-chained outputs. */
public void setOperatorNonChainedOutputs(List<NonChainedOutput> nonChainedOutputs) {
toBeSerializedConfigObjects.put(OP_NONCHAINED_OUTPUTS, nonChainedOutputs);
} | 3.68 |
hbase_BufferedMutatorParams_opertationTimeout | /**
* @deprecated Since 2.3.0, will be removed in 4.0.0. Use {@link #operationTimeout(int)}
*/
@Deprecated
public BufferedMutatorParams opertationTimeout(final int operationTimeout) {
this.operationTimeout = operationTimeout;
return this;
} | 3.68 |
hmily_HashedWheelTimer_clearTimeouts | /**
* Clear this bucket and return all not expired / cancelled {@link Timeout}s.
*/
public void clearTimeouts(final Set<Timeout> set) {
for (;;) {
HashedWheelTimeout timeout = pollTimeout();
if (timeout == null) {
return;
}
if (timeout.isExpired() || timeout.isCancelled()) {
continue;
}
set.add(timeout);
}
} | 3.68 |
hadoop_ManifestCommitterSupport_maybeAddIOStatistics | /**
* If the object is an IOStatisticsSource, get and add
* its IOStatistics.
* @param o source object.
*/
public static void maybeAddIOStatistics(IOStatisticsAggregator ios,
Object o) {
if (o instanceof IOStatisticsSource) {
ios.aggregate(((IOStatisticsSource) o).getIOStatistics());
}
} | 3.68 |
rocketmq-connect_RebalanceService_onConfigUpdate | /**
* When config change.
*/
@Override
public void onConfigUpdate() {
RebalanceService.this.wakeup();
} | 3.68 |
framework_Form_getComponentIterator | /**
* @deprecated As of 7.0, use {@link #iterator()} instead.
*/
@Deprecated
public Iterator<Component> getComponentIterator() {
return iterator();
} | 3.68 |
framework_ContainerEventProvider_getStyleNameProperty | /**
* Get the property which provides the style name for the event.
*/
public Object getStyleNameProperty() {
return styleNameProperty;
} | 3.68 |
flink_Hardware_getSizeOfPhysicalMemoryForLinux | /**
* Returns the size of the physical memory in bytes on a Linux-based operating system.
*
* @return the size of the physical memory in bytes or {@code -1}, if the size could not be
* determined
*/
private static long getSizeOfPhysicalMemoryForLinux() {
try (BufferedReader lineReader =
new BufferedReader(new FileReader(LINUX_MEMORY_INFO_PATH))) {
String line;
while ((line = lineReader.readLine()) != null) {
Matcher matcher = LINUX_MEMORY_REGEX.matcher(line);
if (matcher.matches()) {
String totalMemory = matcher.group(1);
return Long.parseLong(totalMemory) * 1024L; // Convert from kilobyte to byte
}
}
// expected line did not come
LOG.error(
"Cannot determine the size of the physical memory for Linux host (using '/proc/meminfo'). "
+ "Unexpected format.");
return -1;
} catch (NumberFormatException e) {
LOG.error(
"Cannot determine the size of the physical memory for Linux host (using '/proc/meminfo'). "
+ "Unexpected format.");
return -1;
} catch (Throwable t) {
LOG.error(
"Cannot determine the size of the physical memory for Linux host (using '/proc/meminfo') ",
t);
return -1;
}
} | 3.68 |
hudi_AvroSchemaCompatibility_calculateCompatibility | /**
* Calculates the compatibility of a reader/writer schema pair.
*
* <p>
* Relies on external memoization performed by
* {@link #getCompatibility(Schema, Schema)}.
* </p>
*
* @param reader Reader schema to test.
* @param writer Writer schema to test.
* @param locations Stack with which to track the location within the schema.
* @return the compatibility of the reader/writer schema pair.
*/
private SchemaCompatibilityResult calculateCompatibility(final Schema reader, final Schema writer,
final Deque<LocationInfo> locations) {
SchemaCompatibilityResult result = SchemaCompatibilityResult.compatible();
if (reader.getType() == writer.getType()) {
switch (reader.getType()) {
case NULL:
case BOOLEAN:
case INT:
case LONG:
case FLOAT:
case DOUBLE:
case BYTES:
case STRING: {
return result;
}
case ARRAY: {
return result.mergedWith(getCompatibility(reader.getElementType(), writer.getElementType(), locations));
}
case MAP: {
return result.mergedWith(getCompatibility(reader.getValueType(), writer.getValueType(), locations));
}
case FIXED: {
result = result.mergedWith(checkSchemaNames(reader, writer, locations));
return result.mergedWith(checkFixedSize(reader, writer, locations));
}
case ENUM: {
result = result.mergedWith(checkSchemaNames(reader, writer, locations));
return result.mergedWith(checkReaderEnumContainsAllWriterEnumSymbols(reader, writer, locations));
}
case RECORD: {
result = result.mergedWith(checkSchemaNames(reader, writer, locations));
return result.mergedWith(checkReaderWriterRecordFields(reader, writer, locations));
}
case UNION: {
// Check that each individual branch of the writer union can be decoded:
for (final Schema writerBranch : writer.getTypes()) {
SchemaCompatibilityResult compatibility = getCompatibility(reader, writerBranch, locations);
if (compatibility.getCompatibility() == SchemaCompatibilityType.INCOMPATIBLE) {
String message = String.format("reader union lacking writer type: %s", writerBranch.getType());
result = result.mergedWith(SchemaCompatibilityResult.incompatible(
SchemaIncompatibilityType.MISSING_UNION_BRANCH, reader, writer, message, asList(locations)));
}
}
// Each schema in the writer union can be decoded with the reader:
return result;
}
default: {
throw new AvroRuntimeException("Unknown schema type: " + reader.getType());
}
}
} else {
// Reader and writer have different schema types:
// Reader compatible with all branches of a writer union is compatible
if (writer.getType() == Schema.Type.UNION) {
for (Schema s : writer.getTypes()) {
result = result.mergedWith(getCompatibility(reader, s, locations));
}
return result;
}
switch (reader.getType()) {
case NULL:
return result.mergedWith(typeMismatch(reader, writer, locations));
case BOOLEAN:
return result.mergedWith(typeMismatch(reader, writer, locations));
case INT:
return result.mergedWith(typeMismatch(reader, writer, locations));
case LONG: {
return (writer.getType() == Type.INT) ? result : result.mergedWith(typeMismatch(reader, writer, locations));
}
case FLOAT: {
return ((writer.getType() == Type.INT) || (writer.getType() == Type.LONG)) ? result
: result.mergedWith(typeMismatch(reader, writer, locations));
}
case DOUBLE: {
return ((writer.getType() == Type.INT) || (writer.getType() == Type.LONG) || (writer.getType() == Type.FLOAT))
? result
: result.mergedWith(typeMismatch(reader, writer, locations));
}
case BYTES: {
return (writer.getType() == Type.STRING) ? result : result.mergedWith(typeMismatch(reader, writer, locations));
}
case STRING: {
return (isTypeNumeric(writer.getType()) || (writer.getType() == Schema.Type.BYTES)
? result : result.mergedWith(typeMismatch(reader, writer, locations)));
}
case ARRAY:
return result.mergedWith(typeMismatch(reader, writer, locations));
case MAP:
return result.mergedWith(typeMismatch(reader, writer, locations));
case FIXED:
return result.mergedWith(typeMismatch(reader, writer, locations));
case ENUM:
return result.mergedWith(typeMismatch(reader, writer, locations));
case RECORD:
return result.mergedWith(typeMismatch(reader, writer, locations));
case UNION: {
for (final Schema readerBranch : reader.getTypes()) {
SchemaCompatibilityResult compatibility = getCompatibility(readerBranch, writer, locations);
if (compatibility.getCompatibility() == SchemaCompatibilityType.COMPATIBLE) {
return result;
}
}
// No branch in the reader union has been found compatible with the writer
// schema:
String message = String.format("reader union lacking writer type: %s", writer.getType());
return result.mergedWith(SchemaCompatibilityResult
.incompatible(SchemaIncompatibilityType.MISSING_UNION_BRANCH, reader, writer, message, asList(locations)));
}
default: {
throw new AvroRuntimeException("Unknown schema type: " + reader.getType());
}
}
}
} | 3.68 |
framework_Result_ifError | /**
* Applies the {@code consumer} if result is an error.
*
* @param consumer
* consumer to apply in case it's an error
*/
public default void ifError(SerializableConsumer<String> consumer) {
handle(value -> {
}, consumer);
} | 3.68 |
hudi_HoodieParquetInputFormat_initAvroInputFormat | /**
* Spark2 use `parquet.hadoopParquetInputFormat` in `com.twitter:parquet-hadoop-bundle`.
* So that we need to distinguish the constructions of classes with
* `parquet.hadoopParquetInputFormat` or `org.apache.parquet.hadoop.ParquetInputFormat`.
* If we use `org.apache.parquet:parquet-hadoop`, we can use `HudiAvroParquetInputFormat`
* in Hive or Spark3 to get timestamp with correct type.
*/
private void initAvroInputFormat() {
try {
Constructor[] constructors = ParquetRecordReaderWrapper.class.getConstructors();
if (Arrays.stream(constructors)
.anyMatch(c -> c.getParameterCount() > 0 && c.getParameterTypes()[0]
.getName().equals(ParquetInputFormat.class.getName()))) {
supportAvroRead = true;
}
} catch (SecurityException e) {
throw new HoodieException("Failed to check if support avro reader: " + e.getMessage(), e);
}
} | 3.68 |
framework_AbstractField_createValueChange | /**
* Returns a new value change event instance.
*
* @param oldValue
* the value of this field before this value change event
* @param userOriginated
* {@code true} if this event originates from the client,
* {@code false} otherwise.
* @return the new event
*/
protected ValueChangeEvent<T> createValueChange(T oldValue,
boolean userOriginated) {
return new ValueChangeEvent<>(this, oldValue, userOriginated);
} | 3.68 |
framework_AbstractOrderedLayoutConnector_updateAllSlotListeners | /**
* Add slot listeners
*/
private void updateAllSlotListeners() {
for (ComponentConnector child : getChildComponents()) {
updateSlotListeners(child);
}
} | 3.68 |
framework_AbsoluteLayout_writePositionAttribute | /**
* Private method for writing position attributes
*
* @since 7.4
* @param node
* target node
* @param key
* attribute key
* @param symbol
* value symbol
* @param value
* the value
*/
private void writePositionAttribute(Node node, String key, String symbol,
Float value) {
if (value != null) {
String valueString = DesignAttributeHandler.getFormatter()
.format(value);
node.attr(key, valueString + symbol);
}
} | 3.68 |
hbase_MasterMaintenanceModeTracker_start | /**
* Starts the tracking of whether master is in Maintenance Mode.
*/
public void start() {
watcher.registerListener(this);
update();
} | 3.68 |
flink_AsyncWaitOperator_registerTimer | /** Utility method to register timeout timer. */
private ScheduledFuture<?> registerTimer(
ProcessingTimeService processingTimeService,
long timeout,
ThrowingConsumer<Void, Exception> callback) {
final long timeoutTimestamp = timeout + processingTimeService.getCurrentProcessingTime();
return processingTimeService.registerTimer(
timeoutTimestamp, timestamp -> callback.accept(null));
} | 3.68 |
hadoop_ServiceLauncher_noteException | /**
* Record that an Exit Exception has been raised.
* Save it to {@link #serviceException}, with its exit code in
* {@link #serviceExitCode}
* @param exitException exception
*/
void noteException(ExitUtil.ExitException exitException) {
int exitCode = exitException.getExitCode();
if (exitCode != 0) {
LOG.debug("Exception raised with exit code {}",
exitCode,
exitException);
Throwable cause = exitException.getCause();
if (cause != null) {
// log the nested exception in more detail
LOG.warn("{}", cause.toString(), cause);
}
}
serviceExitCode = exitCode;
serviceException = exitException;
} | 3.68 |
hbase_VersionResource_getClusterVersionResource | /**
* Dispatch to StorageClusterVersionResource
*/
@Path("cluster")
public StorageClusterVersionResource getClusterVersionResource() throws IOException {
return new StorageClusterVersionResource();
} | 3.68 |
pulsar_ManagedLedger_skipNonRecoverableLedger | /**
* If a ledger is lost, this ledger will be skipped after enabled "autoSkipNonRecoverableData", and the method is
* used to delete information about this ledger in the ManagedCursor.
*/
default void skipNonRecoverableLedger(long ledgerId){} | 3.68 |
querydsl_BooleanExpression_and | /**
* Create a {@code this && right} expression
*
* <p>Returns an intersection of this and the given expression</p>
*
* @param right right hand side of the union
* @return {@code this && right}
*/
public BooleanExpression and(@Nullable Predicate right) {
right = (Predicate) ExpressionUtils.extract(right);
if (right != null) {
return Expressions.booleanOperation(Ops.AND, mixin, right);
} else {
return this;
}
} | 3.68 |
streampipes_BoilerpipeHTMLContentHandler_endPrefixMapping | // @Override
public void endPrefixMapping(String prefix) throws SAXException {
} | 3.68 |
flink_VoidNamespace_get | /** Getter for the singleton instance. */
public static VoidNamespace get() {
return INSTANCE;
} | 3.68 |
flink_JoinedRowData_replace | /**
* Replaces the {@link RowData} backing this {@link JoinedRowData}.
*
* <p>This method replaces the backing rows in place and does not return a new object. This is
* done for performance reasons.
*/
public JoinedRowData replace(RowData row1, RowData row2) {
this.row1 = row1;
this.row2 = row2;
return this;
} | 3.68 |
framework_VScrollTable_isEnabled | /**
* Is the cell enabled?
*
* @return True if enabled else False
*/
public boolean isEnabled() {
return getParent() != null;
} | 3.68 |
flink_ZooKeeperStateHandleStore_addAndLock | /**
* Creates a state handle, stores it in ZooKeeper and locks it. A locked node cannot be removed
* by another {@link ZooKeeperStateHandleStore} instance as long as this instance remains
* connected to ZooKeeper.
*
* <p><strong>Important</strong>: This will <em>not</em> store the actual state in ZooKeeper,
* but create a state handle and store it in ZooKeeper. This level of indirection makes sure
* that data in ZooKeeper is small.
*
* <p>The operation will fail if there is already a node under the given path.
*
* @param pathInZooKeeper Destination path in ZooKeeper (expected to *not* exist yet)
* @param state State to be added
* @return The Created {@link RetrievableStateHandle}.
* @throws PossibleInconsistentStateException if the write-to-ZooKeeper operation failed. This
* indicates that it's not clear whether the new state was successfully written to ZooKeeper
* or not. Proper error handling has to be applied on the caller's side.
* @throws Exception If a ZooKeeper or state handle operation fails
*/
@Override
public RetrievableStateHandle<T> addAndLock(String pathInZooKeeper, T state)
throws PossibleInconsistentStateException, Exception {
checkNotNull(pathInZooKeeper, "Path in ZooKeeper");
checkNotNull(state, "State");
final String path = normalizePath(pathInZooKeeper);
final Optional<Stat> maybeStat = getStat(path);
if (maybeStat.isPresent()) {
if (isNotMarkedForDeletion(maybeStat.get())) {
throw new AlreadyExistException(
String.format("ZooKeeper node %s already exists.", path));
}
Preconditions.checkState(
releaseAndTryRemove(path),
"The state is marked for deletion and, therefore, should be deletable.");
}
final RetrievableStateHandle<T> storeHandle = storage.store(state);
final byte[] serializedStoreHandle = serializeOrDiscard(storeHandle);
try {
writeStoreHandleTransactionally(path, serializedStoreHandle);
return storeHandle;
} catch (KeeperException.NodeExistsException e) {
// Transactions are not idempotent in the curator version we're currently using, so it
// is actually possible that we've re-tried a transaction that has already succeeded.
// We've ensured that the node hasn't been present prior executing the transaction, so
// we can assume that this is a result of the retry mechanism.
return storeHandle;
} catch (Exception e) {
if (indicatesPossiblyInconsistentState(e)) {
throw new PossibleInconsistentStateException(e);
}
// In case of any other failure, discard the state and rethrow the exception.
storeHandle.discardState();
throw e;
}
} | 3.68 |
hbase_KeyValue_getTagsArray | /**
* Returns the backing array of the entire KeyValue (all KeyValue fields are in a single array)
*/
@Override
public byte[] getTagsArray() {
return bytes;
} | 3.68 |
hbase_RegionPlacementMaintainer_invertIndices | /**
* Given an array where each element {@code indices[i]} represents the randomized column index
* corresponding to randomized row index {@code i}, create a new array with the corresponding
* inverted indices.
* @param indices an array of transformed indices to be inverted
* @return an array of inverted indices
*/
public int[] invertIndices(int[] indices) {
int[] result = new int[indices.length];
for (int i = 0; i < indices.length; i++) {
result[rowInverse[i]] = colInverse[indices[i]];
}
return result;
} | 3.68 |
framework_LayoutManager_getBorderRight | /**
* Gets the right border of the given element, provided that it has been
* measured. These elements are guaranteed to be measured:
* <ul>
* <li>ManagedLayouts and their child Connectors
* <li>Elements for which there is at least one ElementResizeListener
* <li>Elements for which at least one ManagedLayout has registered a
* dependency
* </ul>
*
* A negative number is returned if the element has not been measured. If 0
* is returned, it might indicate that the element is not attached to the
* DOM.
*
* @param element
* the element to get the measured size for
* @return the measured right border of the element in pixels.
*/
public int getBorderRight(Element element) {
assert needsMeasure(
element) : "Getting measurement for element that is not measured";
return getMeasuredSize(element, nullSize).getBorderRight();
} | 3.68 |
flink_RocksDBNativeMetricOptions_enableNumEntriesActiveMemTable | /** Returns total number of entries in the active memtable. */
public void enableNumEntriesActiveMemTable() {
this.properties.add(RocksDBProperty.NumEntriesActiveMemTable.getRocksDBProperty());
} | 3.68 |
flink_AbstractMetricGroup_putVariables | /**
* Enters all variables specific to this {@link AbstractMetricGroup} and their associated values
* into the map.
*
* @param variables map to enter variables and their values into
*/
protected void putVariables(Map<String, String> variables) {} | 3.68 |
hadoop_NvidiaGPUPluginForRuntimeV2_getMajorNumber | // Get major number from device name.
private String getMajorNumber(String devName) {
String output = null;
// output "major:minor" in hex
try {
LOG.debug("Get major numbers from /dev/{}", devName);
output = shellExecutor.getMajorMinorInfo(devName);
String[] strs = output.trim().split(":");
LOG.debug("stat output:{}", output);
output = Integer.toString(Integer.parseInt(strs[0], 16));
} catch (IOException e) {
String msg =
"Failed to get major number from reading /dev/" + devName;
LOG.warn(msg);
} catch (NumberFormatException e) {
LOG.error("Failed to parse device major number from stat output");
output = null;
}
return output;
} | 3.68 |
hadoop_TaskManifest_validate | /**
* Validate the data: those fields which must be non empty, must be set.
* @throws IOException if the data is invalid
* @return
*/
public TaskManifest validate() throws IOException {
verify(TYPE.equals(type), "Wrong type: %s", type);
verify(version == VERSION, "Wrong version: %s", version);
validateCollectionClass(extraData.keySet(), String.class);
validateCollectionClass(extraData.values(), String.class);
Set<String> destinations = new HashSet<>(filesToCommit.size());
validateCollectionClass(filesToCommit, FileEntry.class);
for (FileEntry c : filesToCommit) {
c.validate();
verify(!destinations.contains(c.getDest()),
"Destination %s is written to by more than one pending commit",
c.getDest());
destinations.add(c.getDest());
}
return this;
} | 3.68 |
pulsar_PulsarAuthorizationProvider_canLookupAsync | /**
* Check whether the specified role can perform a lookup for the specified topic.
*
* For that the caller needs to have producer or consumer permission.
*
* @param topicName
* @param role
* @return
* @throws Exception
*/
@Override
public CompletableFuture<Boolean> canLookupAsync(TopicName topicName, String role,
AuthenticationDataSource authenticationData) {
return canProduceAsync(topicName, role, authenticationData)
.thenCompose(canProduce -> {
if (canProduce) {
return CompletableFuture.completedFuture(true);
}
return canConsumeAsync(topicName, role, authenticationData, null);
});
} | 3.68 |
hadoop_CachingBlockManager_get | /**
* Gets the block having the given {@code blockNumber}.
*
* @throws IllegalArgumentException if blockNumber is negative.
*/
@Override
public BufferData get(int blockNumber) throws IOException {
checkNotNegative(blockNumber, "blockNumber");
BufferData data;
final int maxRetryDelayMs = bufferPoolSize * 120 * 1000;
final int statusUpdateDelayMs = 120 * 1000;
Retryer retryer = new Retryer(10, maxRetryDelayMs, statusUpdateDelayMs);
boolean done;
do {
if (closed) {
throw new IOException("this stream is already closed");
}
data = bufferPool.acquire(blockNumber);
done = getInternal(data);
if (retryer.updateStatus()) {
LOG.warn("waiting to get block: {}", blockNumber);
LOG.info("state = {}", this.toString());
}
}
while (!done && retryer.continueRetry());
if (done) {
return data;
} else {
String message = String.format("Wait failed for get(%d)", blockNumber);
throw new IllegalStateException(message);
}
} | 3.68 |
morf_ResultSetMismatch_getRightValue | /**
* @return Value from the right hand result set.
*/
public String getRightValue() {
return rightValue;
} | 3.68 |
dubbo_PathUtil_setArgInfoSplitIndex | /**
* parse pathVariable index from url by annotation info
*
* @param rawPath
* @param argInfos
*/
public static void setArgInfoSplitIndex(String rawPath, List<ArgInfo> argInfos) {
String[] split = rawPath.split(SEPARATOR);
List<PathPair> pathPairs = new ArrayList<>();
for (ArgInfo argInfo : argInfos) {
if (ParamType.PATH.supportAnno(argInfo.getParamAnnotationType())) {
pathPairs.add(new PathPair(argInfo));
}
}
for (int i = 0; i < split.length; i++) {
String s = split[i];
for (PathPair pathPair : pathPairs) {
boolean match = pathPair.match(s);
if (match) {
pathPair.setArgInfoSplitIndex(i);
}
}
}
} | 3.68 |
hbase_WALEntryBatch_getWalEntriesWithSize | /** Returns the WAL Entries. */
public List<Pair<Entry, Long>> getWalEntriesWithSize() {
return walEntriesWithSize;
} | 3.68 |
hadoop_AzureNativeFileSystemStore_checkContainer | /**
* This should be called from any method that does any modifications to the
* underlying container: it makes sure to put the WASB current version in the
* container's metadata if it's not already there.
*/
private ContainerState checkContainer(ContainerAccessType accessType)
throws StorageException, AzureException {
synchronized (containerStateLock) {
if (isOkContainerState(accessType)) {
return currentKnownContainerState;
}
if (currentKnownContainerState == ContainerState.ExistsAtWrongVersion) {
String containerVersion = retrieveVersionAttribute(container);
throw wrongVersionException(containerVersion);
}
// This means I didn't check it before or it didn't exist or
// we need to stamp the version. Since things may have changed by
// other machines since then, do the check again and don't depend
// on past information.
// Sanity check: we don't expect this at this point.
if (currentKnownContainerState == ContainerState.ExistsAtRightVersion) {
throw new AssertionError("Unexpected state: "
+ currentKnownContainerState);
}
// Download the attributes - doubles as an existence check with just
// one service call
try {
container.downloadAttributes(getInstrumentedContext());
currentKnownContainerState = ContainerState.Unknown;
} catch (StorageException ex) {
if (StorageErrorCodeStrings.CONTAINER_NOT_FOUND.toString()
.equals(ex.getErrorCode())) {
currentKnownContainerState = ContainerState.DoesntExist;
} else {
throw ex;
}
}
if (currentKnownContainerState == ContainerState.DoesntExist) {
// If the container doesn't exist and we intend to write to it,
// create it now.
if (needToCreateContainer(accessType)) {
storeVersionAttribute(container);
container.create(getInstrumentedContext());
currentKnownContainerState = ContainerState.ExistsAtRightVersion;
}
} else {
// The container exists, check the version.
String containerVersion = retrieveVersionAttribute(container);
if (containerVersion != null) {
if (containerVersion.equals(FIRST_WASB_VERSION)) {
// It's the version from when WASB was called ASV, just
// fix the version attribute if needed and proceed.
// We should be good otherwise.
if (needToStampVersion(accessType)) {
storeVersionAttribute(container);
container.uploadMetadata(getInstrumentedContext());
}
} else if (!containerVersion.equals(CURRENT_WASB_VERSION)) {
// Don't know this version - throw.
currentKnownContainerState = ContainerState.ExistsAtWrongVersion;
throw wrongVersionException(containerVersion);
} else {
// It's our correct version.
currentKnownContainerState = ContainerState.ExistsAtRightVersion;
}
} else {
// No version info exists.
currentKnownContainerState = ContainerState.ExistsNoVersion;
if (needToStampVersion(accessType)) {
// Need to stamp the version
storeVersionAttribute(container);
container.uploadMetadata(getInstrumentedContext());
currentKnownContainerState = ContainerState.ExistsAtRightVersion;
}
}
}
return currentKnownContainerState;
}
} | 3.68 |
framework_MenuBarTooltipsNearEdge_getTicketNumber | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber()
*/
@Override
protected Integer getTicketNumber() {
return 12870;
} | 3.68 |
hudi_MercifulJsonConverter_convert | /**
* Converts json to Avro generic record.
* NOTE: if sanitization is needed for avro conversion, the schema input to this method is already sanitized.
* During the conversion here, we sanitize the fields in the data
*
* @param json Json record
* @param schema Schema
*/
public GenericRecord convert(String json, Schema schema) {
try {
Map<String, Object> jsonObjectMap = mapper.readValue(json, Map.class);
return convertJsonToAvro(jsonObjectMap, schema, shouldSanitize, invalidCharMask);
} catch (IOException e) {
throw new HoodieIOException(e.getMessage(), e);
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.