name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_RetryReason_getAbbreviation | /**
* Method to get correct abbreviation for a given set of exception, statusCode,
* storageStatusCode.
*
* @param ex exception caught during server communication.
* @param statusCode statusCode in the server response.
* @param storageErrorMessage storageErrorMessage in the server response.
*
* @return abbreviation for the the given set of exception, statusCode, storageStatusCode.
*/
static String getAbbreviation(Exception ex,
Integer statusCode,
String storageErrorMessage) {
String result = null;
for (RetryReasonCategory retryReasonCategory : rankedReasonCategories) {
final String abbreviation
= retryReasonCategory.captureAndGetAbbreviation(ex,
statusCode, storageErrorMessage);
if (abbreviation != null) {
result = abbreviation;
}
}
return result;
} | 3.68 |
hbase_BucketCache_getRAMQueueEntries | /**
* Blocks until elements available in {@code q} then tries to grab as many as possible before
* returning.
* @param receptacle Where to stash the elements taken from queue. We clear before we use it just
* in case.
* @param q The queue to take from.
* @return {@code receptacle} laden with elements taken from the queue or empty if none found.
*/
static List<RAMQueueEntry> getRAMQueueEntries(BlockingQueue<RAMQueueEntry> q,
List<RAMQueueEntry> receptacle) throws InterruptedException {
// Clear sets all entries to null and sets size to 0. We retain allocations. Presume it
// ok even if list grew to accommodate thousands.
receptacle.clear();
receptacle.add(q.take());
q.drainTo(receptacle);
return receptacle;
} | 3.68 |
morf_FieldReference_as | /**
* Specifies the alias to use for the field.
*
* @param aliasName the name of the alias
* @return this
*/
@Override
public Builder as(String aliasName) {
this.alias = aliasName;
return this;
} | 3.68 |
hbase_RegionScanner_getOperationId | /**
* @return The Scanner's {@link org.apache.hadoop.hbase.client.Scan#ID_ATRIBUTE} value, or null if
* not set.
*/
default String getOperationId() {
return null;
} | 3.68 |
hadoop_LoggingAuditor_beforeExecution | /**
* Handle requests made without a real context by logging and
* increment the failure count.
* Some requests (e.g. copy part) are not expected in spans due
* to how they are executed; these do not trigger failures.
* @param context The current state of the execution, including
* the unmodified SDK request from the service
* client call.
* @param executionAttributes A mutable set of attributes scoped
* to one specific request/response
* cycle that can be used to give data
* to future lifecycle methods.
*/
@Override
public void beforeExecution(Context.BeforeExecution context,
ExecutionAttributes executionAttributes) {
String error = "executing a request outside an audit span "
+ analyzer.analyze(context.request());
final String unaudited = getSpanId() + " "
+ UNAUDITED_OPERATION + " " + error;
if (isRequestNotAlwaysInSpan(context.request())) {
// can get by auditing during a copy, so don't overreact
LOG.debug(unaudited);
} else {
final RuntimeException ex = new AuditFailureException(unaudited);
LOG.debug(unaudited, ex);
if (rejectOutOfSpan) {
throw ex;
}
}
// now hand off to the superclass for its normal preparation
super.beforeExecution(context, executionAttributes);
} | 3.68 |
druid_SQLCommitStatement_getTransactionName | // sql server
public SQLExpr getTransactionName() {
return transactionName;
} | 3.68 |
streampipes_AbstractConfigurablePipelineElementBuilder_requiredIntegerParameter | /**
* Defines a number-based configuration parameter of type integer provided by pipeline developers at pipeline
* authoring time and initializes the parameter with a default value.
*
* @param label The {@link org.apache.streampipes.sdk.helpers.Label}
* that describes why this parameter is needed in a user-friendly manner.
* @param defaultValue The default integer value.
* @return this
*/
public K requiredIntegerParameter(Label label,
Integer defaultValue) {
FreeTextStaticProperty fsp = prepareFreeTextStaticProperty(label,
XSD.INTEGER.toString());
fsp.setValue(String.valueOf(defaultValue));
this.staticProperties.add(fsp);
return me();
} | 3.68 |
morf_AbstractSelectStatement_from | /**
* Selects fields from one or more inner selects:
*
* <blockquote><pre>
* SelectStatement statement = select().from(select().from("Foo"));
* </pre></blockquote>
*
* @param fromSelect the select statements to select from
* @return a new select statement with the change applied.
*/
public T from(SelectStatement... fromSelect) {
return copyOnWriteOrMutate(
b -> b.from(fromSelect),
() -> this.fromSelects.addAll(Arrays.asList(fromSelect))
);
} | 3.68 |
dubbo_URLParam_hasMethodParameter | /**
* Weather there contains some parameter match method
*
* @param method method name
* @return contains or not
*/
public boolean hasMethodParameter(String method) {
if (method == null) {
return false;
}
String methodsString = getParameter(METHODS_KEY);
if (StringUtils.isNotEmpty(methodsString)) {
if (!methodsString.contains(method)) {
return false;
}
}
for (Map.Entry<String, Map<String, String>> methods : METHOD_PARAMETERS.entrySet()) {
if (methods.getValue().containsKey(method)) {
return true;
}
}
return false;
} | 3.68 |
framework_HierarchicalDataCommunicator_getItemCollapseAllowedProvider | /**
* Gets the item collapse allowed provider.
*
* @return the item collapse allowed provider
*/
public ItemCollapseAllowedProvider<T> getItemCollapseAllowedProvider() {
return itemCollapseAllowedProvider;
} | 3.68 |
zxing_BitMatrix_getHeight | /**
* @return The height of the matrix
*/
public int getHeight() {
return height;
} | 3.68 |
hadoop_HostnameFilter_init | /**
* Initializes the filter.
* <p>
* This implementation is a NOP.
*
* @param config filter configuration.
*
* @throws ServletException thrown if the filter could not be initialized.
*/
@Override
public void init(FilterConfig config) throws ServletException {
} | 3.68 |
hudi_HoodieFileGroupReader_hasNext | /**
* @return {@code true} if the next record exists; {@code false} otherwise.
* @throws IOException on reader error.
*/
public boolean hasNext() throws IOException {
return recordBuffer.hasNext();
} | 3.68 |
hadoop_FederationStateStoreFacade_getInstance | /**
* Returns the singleton instance of the FederationStateStoreFacade object.
*
* @param conf configuration.
* @return the singleton {@link FederationStateStoreFacade} instance
*/
public static FederationStateStoreFacade getInstance(Configuration conf) {
return getInstanceInternal(conf);
} | 3.68 |
flink_CsvOutputFormat_setInputType | /**
* The purpose of this method is solely to check whether the data type to be processed is in
* fact a tuple type.
*/
@Override
public void setInputType(TypeInformation<?> type, ExecutionConfig executionConfig) {
if (!type.isTupleType()) {
throw new InvalidProgramException(
"The "
+ CsvOutputFormat.class.getSimpleName()
+ " can only be used to write tuple data sets.");
}
} | 3.68 |
hbase_DefaultEnvironmentEdge_currentTime | /**
* {@inheritDoc}
* <p>
* This implementation returns {@link System#currentTimeMillis()}
* </p>
*/
@Override
public long currentTime() {
return System.currentTimeMillis();
} | 3.68 |
shardingsphere-elasticjob_GuaranteeService_isAllStarted | /**
* Judge whether job's sharding items are all started.
*
* @return job's sharding items are all started or not
*/
public boolean isAllStarted() {
return jobNodeStorage.isJobNodeExisted(GuaranteeNode.STARTED_ROOT)
&& configService.load(false).getShardingTotalCount() == jobNodeStorage.getJobNodeChildrenKeys(GuaranteeNode.STARTED_ROOT).size();
} | 3.68 |
hadoop_EventColumnName_getColumnQualifier | /**
* @return a byte array with each components/fields separated by
* Separator#VALUES. This leads to an event column name of the form
* eventId=timestamp=infokey. If both timestamp and infokey are null,
* then a qualifier of the form eventId=timestamp= is returned. If
* only infokey is null, then a qualifier of the form eventId= is
* returned. These prefix forms are useful for queries that intend to
* retrieve more than one specific column name.
*/
public byte[] getColumnQualifier() {
return eventColumnNameConverter.encode(this);
} | 3.68 |
framework_VDebugWindow_getMillisSinceStart | /**
* Gets the milliseconds since application start.
*
* @return
*/
static int getMillisSinceStart() {
return START.elapsedMillis();
} | 3.68 |
framework_JsonPaintTarget_toXmlChar | /**
* Substitutes a XML sensitive character with predefined XML entity.
*
* @param c
* the Character to be replaced with an entity.
* @return String of the entity or null if character is not to be replaced
* with an entity.
*/
private static String toXmlChar(char c) {
switch (c) {
case '&':
return "&"; // & => &
case '>':
return ">"; // > => >
case '<':
return "<"; // < => <
case '"':
return """; // " => "
case '\'':
return "'"; // ' => '
default:
return null;
}
} | 3.68 |
flink_CopyOnWriteStateMap_doubleCapacity | /**
* Doubles the capacity of the hash table. Existing entries are placed in the correct bucket on
* the enlarged table. If the current capacity is, MAXIMUM_CAPACITY, this method is a no-op.
* Returns the table, which will be new unless we were already at MAXIMUM_CAPACITY.
*/
private void doubleCapacity() {
// There can only be one rehash in flight. From the amount of incremental rehash steps we
// take, this should always hold.
Preconditions.checkState(!isRehashing(), "There is already a rehash in progress.");
StateMapEntry<K, N, S>[] oldMap = primaryTable;
int oldCapacity = oldMap.length;
if (oldCapacity == MAXIMUM_CAPACITY) {
return;
}
incrementalRehashTable = makeTable(oldCapacity * 2);
} | 3.68 |
flink_DataSet_collect | /**
* Convenience method to get the elements of a DataSet as a List. As DataSet can contain a lot
* of data, this method should be used with caution.
*
* @return A List containing the elements of the DataSet
*/
public List<T> collect() throws Exception {
final String id = new AbstractID().toString();
final TypeSerializer<T> serializer =
getType().createSerializer(getExecutionEnvironment().getConfig());
this.output(new Utils.CollectHelper<>(id, serializer)).name("collect()");
JobExecutionResult res = getExecutionEnvironment().execute();
ArrayList<byte[]> accResult = res.getAccumulatorResult(id);
if (accResult != null) {
try {
return SerializedListAccumulator.deserializeList(accResult, serializer);
} catch (ClassNotFoundException e) {
throw new RuntimeException("Cannot find type class of collected data type.", e);
} catch (IOException e) {
throw new RuntimeException(
"Serialization error while deserializing collected data", e);
}
} else {
throw new RuntimeException("The call to collect() could not retrieve the DataSet.");
}
} | 3.68 |
hbase_Scan_setMvccReadPoint | /**
* Set the mvcc read point used to open a scanner.
*/
Scan setMvccReadPoint(long mvccReadPoint) {
this.mvccReadPoint = mvccReadPoint;
return this;
} | 3.68 |
flink_AbstractAutoCloseableRegistry_registerCloseable | /**
* Registers a {@link AutoCloseable} with the registry. In case the registry is already closed,
* this method throws an {@link IllegalStateException} and closes the passed {@link
* AutoCloseable}.
*
* @param closeable Closeable to register.
* @throws IOException exception when the registry was closed before.
*/
public final void registerCloseable(C closeable) throws IOException {
if (null == closeable) {
return;
}
synchronized (getSynchronizationLock()) {
if (!closed) {
doRegister(closeable, closeableToRef);
return;
}
}
IOUtils.closeQuietly(closeable);
throw new IOException(
"Cannot register Closeable, registry is already closed. Closing argument.");
} | 3.68 |
hadoop_FlowRunCoprocessor_postScannerOpen | /*
* (non-Javadoc)
*
* Creates a {@link FlowScanner} Scan so that it can correctly process the
* contents of {@link FlowRunTable}.
*
* @see
* org.apache.hadoop.hbase.coprocessor.BaseRegionObserver#postScannerOpen(
* org.apache.hadoop.hbase.coprocessor.ObserverContext,
* org.apache.hadoop.hbase.client.Scan,
* org.apache.hadoop.hbase.regionserver.RegionScanner)
*/
@Override
public RegionScanner postScannerOpen(
ObserverContext<RegionCoprocessorEnvironment> e, Scan scan,
RegionScanner scanner) throws IOException {
return new FlowScanner(e.getEnvironment(), scan,
scanner, FlowScannerOperation.READ);
} | 3.68 |
framework_ContainerEventProvider_setStartDateProperty | /**
* Set the property which provides the starting date and time of the event.
*/
public void setStartDateProperty(Object startDateProperty) {
this.startDateProperty = startDateProperty;
} | 3.68 |
hbase_DependentColumnFilter_getFamily | /** Returns the column family */
public byte[] getFamily() {
return this.columnFamily;
} | 3.68 |
framework_Table_getFirstUpdatedItemIndex | /**
* Subclass and override this to enable partial row updates, bypassing the
* normal caching and lazy loading mechanism. This is useful for updating
* the state of certain rows, e.g. in the TreeTable the collapsed state of a
* single node is updated using this mechanism.
*
* @return the index of the first item to be updated. For plain Table it is
* always 0.
*/
protected int getFirstUpdatedItemIndex() {
return 0;
} | 3.68 |
pulsar_LoadSimulationController_process | // Update the message rate information for the bundles in a recently changed load report.
public synchronized void process(final WatchedEvent event) {
try {
// Get the load report and put this back as a watch.
final LoadReport loadReport = ObjectMapperFactory.getMapper().getObjectMapper()
.readValue(zkClient.getData(path, this, null), LoadReport.class);
for (final Map.Entry<String, NamespaceBundleStats> entry : loadReport.getBundleStats().entrySet()) {
final String bundle = entry.getKey();
final String namespace = bundle.substring(0, bundle.lastIndexOf('/'));
final String topic = String.format("%s/%s", namespace, "t");
final NamespaceBundleStats stats = entry.getValue();
// Approximate total message rate via average between in/out.
final double messageRate = arguments.rateMultiplier * (stats.msgRateIn + stats.msgRateOut) / 2;
// size = throughput / rate.
final int messageSize = (int) Math.ceil(arguments.rateMultiplier
* (stats.msgThroughputIn + stats.msgThroughputOut) / (2 * messageRate));
arguments.rate = messageRate;
arguments.size = messageSize;
// Try to modify the topic if it already exists. Otherwise, create it.
changeOrCreate(arguments, topic);
}
} catch (Exception ex) {
throw new RuntimeException(ex);
}
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectEvery | /**
* Tests select statement with Every function.
*/
@Test
public void testSelectEvery() {
SelectStatement statement = select(every(field(BOOLEAN_FIELD))).from(tableRef(TEST_TABLE));
assertEquals("Select scripts are not the same", expectedSelectEvery(), testDialect.convertStatementToSQL(statement));
} | 3.68 |
hadoop_RenameFilesStage_commitOneFile | /**
* Commit one file by rename, then, if that doesn't fail,
* add to the files committed list.
* @param entry entry to commit.
* @throws IOException faiure.
*/
private void commitOneFile(FileEntry entry) throws IOException {
updateAuditContext(OP_STAGE_JOB_RENAME_FILES);
// report progress back
progress();
// if the dest dir is to be deleted,
// look to see if the parent dir was created.
// if it was. we know that the file doesn't exist.
final boolean deleteDest = getStageConfig().getDeleteTargetPaths()
&& !createdDirectories.contains(entry.getDestPath().getParent());
// do the rename
commitFile(entry, deleteDest);
// update the list and IOStats
synchronized (this) {
filesCommitted.add(entry);
totalFileSize += entry.getSize();
}
} | 3.68 |
framework_VComboBox_inputFieldKeyDown | /**
* Triggered when a key is pressed in the text box
*
* @param event
* The KeyDownEvent
*/
private void inputFieldKeyDown(KeyDownEvent event) {
if (enableDebug) {
debug("VComboBox: inputFieldKeyDown(" + event.getNativeKeyCode()
+ ")");
}
switch (event.getNativeKeyCode()) {
case KeyCodes.KEY_DOWN:
case KeyCodes.KEY_UP:
case KeyCodes.KEY_PAGEDOWN:
case KeyCodes.KEY_PAGEUP:
// open popup as from gadget
filterOptions(-1, "");
tb.selectAll();
dataReceivedHandler.popupOpenerClicked();
break;
case KeyCodes.KEY_ENTER:
/*
* This only handles the case when new items is allowed, a text is
* entered, the popup opener button is clicked to close the popup
* and enter is then pressed (see #7560).
*/
if (!allowNewItems) {
return;
}
if (currentSuggestion != null && tb.getText()
.equals(currentSuggestion.getReplacementString())) {
// Retain behavior from #6686 by returning without stopping
// propagation if there's nothing to do
return;
}
dataReceivedHandler.reactOnInputWhenReady(tb.getText());
suggestionPopup.hide();
event.stopPropagation();
break;
}
} | 3.68 |
hbase_QuotaSettingsFactory_removeTableSpaceLimit | /**
* Creates a {@link QuotaSettings} object to remove the FileSystem space quota for the given
* table.
* @param tableName The name of the table to remove the quota for.
* @return A {@link QuotaSettings} object.
*/
public static QuotaSettings removeTableSpaceLimit(TableName tableName) {
return new SpaceLimitSettings(tableName);
} | 3.68 |
hudi_GcsObjectMetadataFetcher_applyFilter | /**
* @param cloudObjectMetadataDF a Dataset that contains metadata of GCS objects. Assumed to be a persisted form
* of a Cloud Storage Pubsub Notification event.
* @return Dataset<Row> after apply the filtering.
*/
public Dataset<Row> applyFilter(Dataset<Row> cloudObjectMetadataDF) {
String filter = createFilter();
LOG.info("Adding filter string to Dataset: " + filter);
return cloudObjectMetadataDF.filter(filter);
} | 3.68 |
hbase_Bytes_set | /** Use passed bytes as backing array for this instance. */
public void set(final byte[] b, final int offset, final int length) {
this.bytes = b;
this.offset = offset;
this.length = length;
} | 3.68 |
pulsar_TopicsBase_addSchema | // Add a new schema to schema registry for a topic
private CompletableFuture<SchemaVersion> addSchema(SchemaData schemaData) {
// Only need to add to first partition the broker owns since the schema id in schema registry are
// same for all partitions which is the partitionedTopicName
List<Integer> partitions = pulsar().getBrokerService().getOwningTopics()
.get(topicName.getPartitionedTopicName()).values();
CompletableFuture<SchemaVersion> result = new CompletableFuture<>();
for (int index = 0; index < partitions.size(); index++) {
CompletableFuture<SchemaVersion> future = new CompletableFuture<>();
String topicPartitionName = topicName.getPartition(partitions.get(index)).toString();
pulsar().getBrokerService().getTopic(topicPartitionName, false)
.thenAccept(topic -> {
if (!topic.isPresent()) {
future.completeExceptionally(new BrokerServiceException.TopicNotFoundException(
"Topic " + topicPartitionName + " not found"));
} else {
topic.get().addSchema(schemaData).thenAccept(schemaVersion -> future.complete(schemaVersion))
.exceptionally(exception -> {
future.completeExceptionally(exception);
return null;
});
}
});
try {
result.complete(future.get());
break;
} catch (Exception e) {
if (log.isDebugEnabled()) {
log.debug("Fail to add schema to topic " + topicName.getPartitionedTopicName()
+ " for partition " + partitions.get(index) + " for REST produce request.");
}
}
}
// Not able to add schema to any partition
if (!result.isDone()) {
result.completeExceptionally(new SchemaException("Unable to add schema " + schemaData
+ " to topic " + topicName.getPartitionedTopicName()));
}
return result;
} | 3.68 |
hadoop_DataJoinReducerBase_joinAndCollect | /**
* Perform the actual join recursively.
*
* @param tags
* a list of input tags
* @param values
* a list of value lists, each corresponding to one input source
* @param pos
* indicating the next value list to be joined
* @param partialList
* a list of values, each from one value list considered so far.
* @param key
* @param output
* @throws IOException
*/
private void joinAndCollect(Object[] tags, ResetableIterator[] values,
int pos, Object[] partialList, Object key,
OutputCollector output, Reporter reporter) throws IOException {
if (values.length == pos) {
// get a value from each source. Combine them
TaggedMapOutput combined = combine(tags, partialList);
collect(key, combined, output, reporter);
return;
}
ResetableIterator nextValues = values[pos];
nextValues.reset();
while (nextValues.hasNext()) {
Object v = nextValues.next();
partialList[pos] = v;
joinAndCollect(tags, values, pos + 1, partialList, key, output, reporter);
}
} | 3.68 |
hadoop_IOStatisticsLogging_mapToString | /**
* Given a map, add its entryset to the string.
* The entries are only sorted if the source entryset
* iterator is sorted, such as from a TreeMap.
* @param sb string buffer to append to
* @param type type (for output)
* @param map map to evaluate
* @param separator separator
* @param <E> type of values of the map
*/
private static <E> void mapToString(StringBuilder sb,
final String type,
final Map<String, E> map,
final String separator) {
int count = 0;
sb.append(type);
sb.append("=(");
for (Map.Entry<String, E> entry : map.entrySet()) {
if (count > 0) {
sb.append(separator);
}
count++;
sb.append(IOStatisticsBinding.entryToString(
entry.getKey(), entry.getValue()));
}
sb.append(");\n");
} | 3.68 |
framework_Overlay_setZIndex | /**
* Set the z-index (visual stack position) for this overlay.
*
* @param zIndex
* The new z-index
*/
protected void setZIndex(int zIndex) {
getElement().getStyle().setZIndex(zIndex);
} | 3.68 |
graphhopper_JaroWinkler_similarity | /**
* Compute JW similarity.
*/
public final double similarity(final String s1, final String s2) {
int[] mtp = matches(s1, s2);
float m = mtp[0];
if (m == 0) {
return 0f;
}
double j = ((m / s1.length() + m / s2.length() + (m - mtp[1]) / m))
/ THREE;
double jw = j;
if (j > getThreshold()) {
jw = j + Math.min(JW_COEF, 1.0 / mtp[THREE]) * mtp[2] * (1 - j);
}
return jw;
} | 3.68 |
morf_OracleDialect_getSqlForDaysBetween | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForDaysBetween(org.alfasoftware.morf.sql.element.AliasedField, org.alfasoftware.morf.sql.element.AliasedField)
*/
@Override
protected String getSqlForDaysBetween(AliasedField toDate, AliasedField fromDate) {
return String.format("(%s) - (%s)", getSqlFrom(toDate), getSqlFrom(fromDate));
} | 3.68 |
hibernate-validator_ValidationProviderHelper_isDefaultProvider | /**
* Whether the given provider is the default provider or not.
*
* @return {@code true} if the given provider is the default provider, {@code false} otherwise
*/
public boolean isDefaultProvider() {
return isDefaultProvider;
} | 3.68 |
flink_Explainable_explain | /**
* Returns the AST of this object and the execution plan to compute the result of the given
* statement.
*
* @param extraDetails The extra explain details which the result of this method should include,
* e.g. estimated cost, changelog mode for streaming
* @return AST and the execution plan.
*/
default String explain(ExplainDetail... extraDetails) {
return explain(ExplainFormat.TEXT, extraDetails);
} | 3.68 |
framework_IndexedContainer_addDefaultValues | /**
* Helper method to add default values for items if available
*
* @param t
* data table of added item
*/
private void addDefaultValues(Map<Object, Object> t) {
if (defaultPropertyValues != null) {
for (Object key : defaultPropertyValues.keySet()) {
t.put(key, defaultPropertyValues.get(key));
}
}
} | 3.68 |
framework_VComboBox_isrelativeUnits | /**
* @since 7.7
* @param suggestionPopupWidth
* @return
*/
private boolean isrelativeUnits(String suggestionPopupWidth) {
return suggestionPopupWidth.trim().endsWith("%");
} | 3.68 |
pulsar_ResourceGroupService_getResourceGroupInternal | /**
* Get the RG with the given name. For internal operations only.
*/
private ResourceGroup getResourceGroupInternal(String resourceGroupName) {
if (resourceGroupName == null) {
throw new IllegalArgumentException("Invalid null resource group name: " + resourceGroupName);
}
return resourceGroupsMap.get(resourceGroupName);
} | 3.68 |
framework_HierarchyMapper_setFilter | /**
* Sets the current filter. This will cause the hierarchy to be constructed
* again.
*
* @param filter
* the filter
*/
public void setFilter(Object filter) {
this.filter = (F) filter;
} | 3.68 |
framework_NullValidator_setNullAllowed | /**
* Sets if nulls (and only nulls) are to be allowed.
*
* @param onlyNullAllowed
* If true, only nulls are allowed. If false only non-nulls are
* allowed. Do we allow nulls?
*/
public void setNullAllowed(boolean onlyNullAllowed) {
this.onlyNullAllowed = onlyNullAllowed;
} | 3.68 |
hbase_IndexBlockEncoding_getNameInBytes | /** Returns name converted to bytes. */
public byte[] getNameInBytes() {
return Bytes.toBytes(toString());
} | 3.68 |
hbase_AbstractByteRange_getVLong | // Copied from com.google.protobuf.CodedInputStream v2.5.0 readRawVarint64
@Override
public long getVLong(int index) {
int shift = 0;
long result = 0;
while (shift < 64) {
final byte b = get(index++);
result |= (long) (b & 0x7F) << shift;
if ((b & 0x80) == 0) {
break;
}
shift += 7;
}
return result;
} | 3.68 |
framework_SetPageFirstItemLoadsNeededRowsOnly_setup | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server.
* VaadinRequest)
*/
@Override
protected void setup(VaadinRequest request) {
final VerticalLayout layout = new VerticalLayout();
addComponent(layout);
final Label label = new Label("");
addComponent(label);
BeanContainer<String, Bean> beans = new BeanContainer<String, Bean>(
Bean.class) {
@Override
public List<String> getItemIds(int startIndex, int numberOfIds) {
label.setValue("rows requested: " + numberOfIds);
return super.getItemIds(startIndex, numberOfIds);
}
};
beans.setBeanIdProperty("i");
for (int i = 0; i < 2000; i++) {
beans.addBean(new Bean(i));
}
final Table table = new Table("Beans", beans);
table.setVisibleColumns("i");
layout.addComponent(table);
table.setCurrentPageFirstItemIndex(table.size() - 1);
} | 3.68 |
pulsar_ClientBuilderImpl_description | /**
* Set the description.
*
* <p> By default, when the client connects to the broker, a version string like "Pulsar-Java-v<x.y.z>" will be
* carried and saved by the broker. The client version string could be queried from the topic stats.
*
* <p> This method provides a way to add more description to a specific PulsarClient instance. If it's configured,
* the description will be appended to the original client version string, with '-' as the separator.
*
* <p>For example, if the client version is 3.0.0, and the description is "forked", the final client version string
* will be "Pulsar-Java-v3.0.0-forked".
*
* @param description the description of the current PulsarClient instance
* @throws IllegalArgumentException if the length of description exceeds 64
*/
public ClientBuilder description(String description) {
if (description != null && description.length() > 64) {
throw new IllegalArgumentException("description should be at most 64 characters");
}
conf.setDescription(description);
return this;
} | 3.68 |
dubbo_NacosConfigServiceWrapper_handleInnerSymbol | /**
* see {@link com.alibaba.nacos.client.config.utils.ParamUtils#isValid(java.lang.String)}
*/
private String handleInnerSymbol(String data) {
if (data == null) {
return null;
}
return data.replace(INNERCLASS_SYMBOL, INNERCLASS_COMPATIBLE_SYMBOL).replace(SLASH_CHAR, HYPHEN_CHAR);
} | 3.68 |
morf_ResolvedTables_getModifiedTables | /**
* @return Unmodifiable set of modified tables
*/
public Set<String> getModifiedTables() {
return Collections.unmodifiableSet(modifiedTables);
} | 3.68 |
hbase_FsDelegationToken_getRenewer | /** Returns the account name that is allowed to renew the token. */
public String getRenewer() {
return renewer;
} | 3.68 |
hadoop_TimelinePutResponse_addErrors | /**
* Add a list of {@link TimelinePutError} instances into the existing list
*
* @param errors
* a list of {@link TimelinePutError} instances
*/
public void addErrors(List<TimelinePutError> errors) {
this.errors.addAll(errors);
} | 3.68 |
hmily_UpdateStatementAssembler_assembleHmilyUpdateStatement | /**
* Assemble Hmily update statement.
*
* @param updateStatement update statement
* @param hmilyUpdateStatement hmily update statement
* @return hmily update statement
*/
public static HmilyUpdateStatement assembleHmilyUpdateStatement(final UpdateStatement updateStatement, final HmilyUpdateStatement hmilyUpdateStatement) {
HmilySimpleTableSegment hmilySimpleTableSegment = CommonAssembler.assembleHmilySimpleTableSegment((SimpleTableSegment) updateStatement.getTableSegment());
HmilySetAssignmentSegment hmilySetAssignmentSegment = assembleHmilySetAssignmentSegment(updateStatement.getSetAssignment());
HmilyWhereSegment hmilyWhereSegment = null;
if (updateStatement.getWhere().isPresent()) {
hmilyWhereSegment = assembleHmilyWhereSegment(updateStatement.getWhere().get());
}
hmilyUpdateStatement.setTableSegment(hmilySimpleTableSegment);
hmilyUpdateStatement.setSetAssignment(hmilySetAssignmentSegment);
hmilyUpdateStatement.setWhere(hmilyWhereSegment);
return hmilyUpdateStatement;
} | 3.68 |
hbase_LogEventHandler_getNamedQueueRecords | /**
* Retrieve in memory queue records from ringbuffer
* @param request namedQueue request with event type
* @return queue records from ringbuffer after filter (if applied)
*/
NamedQueueGetResponse getNamedQueueRecords(NamedQueueGetRequest request) {
return namedQueueServices.get(request.getNamedQueueEvent()).getNamedQueueRecords(request);
} | 3.68 |
morf_SqlUtils_caseStatement | /**
* Builder method for {@link CaseStatement}.
*
* <p>
* Example:
* </p>
*
* <pre>
* caseStatement(when(eq(field("receiptType"), literal("R"))).then(literal("Receipt")),
* when(eq(field("receiptType"), literal("S"))).then(literal("Agreement Suspense")),
* when(eq(field("receiptType"), literal("T"))).then(literal("General Suspense")))
* .otherwise(literal("UNKNOWN"))
* </pre>
*
* @see #when(Criterion)
*
* @param whenClauses the {@link WhenCondition} portions of the case statement
* @return A builder to create a {@link CaseStatement}.
*/
public static CaseStatementBuilder caseStatement(Iterable<? extends WhenCondition> whenClauses) {
return new CaseStatementBuilder(whenClauses);
} | 3.68 |
flink_BulkIterationBase_validate | /** @throws InvalidProgramException */
public void validate() throws InvalidProgramException {
if (this.input == null) {
throw new RuntimeException("Operator for initial partial solution is not set.");
}
if (this.iterationResult == null) {
throw new InvalidProgramException(
"Operator producing the next version of the partial "
+ "solution (iteration result) is not set.");
}
if (this.terminationCriterion == null && this.numberOfIterations <= 0) {
throw new InvalidProgramException(
"No termination condition is set "
+ "(neither fix number of iteration nor termination criterion).");
}
} | 3.68 |
flink_FileSystem_createRecoverableWriter | /**
* Creates a new {@link RecoverableWriter}. A recoverable writer creates streams that can
* persist and recover their intermediate state. Persisting and recovering intermediate state is
* a core building block for writing to files that span multiple checkpoints.
*
* <p>The returned object can act as a shared factory to open and recover multiple streams.
*
* <p>This method is optional on file systems and various file system implementations may not
* support this method, throwing an {@code UnsupportedOperationException}.
*
* @return A RecoverableWriter for this file system.
* @throws IOException Thrown, if the recoverable writer cannot be instantiated.
*/
public RecoverableWriter createRecoverableWriter() throws IOException {
throw new UnsupportedOperationException(
"This file system does not support recoverable writers.");
} | 3.68 |
rocketmq-connect_JsonSchemaData_fromJsonSchema | /**
* from json schema
*
* @param schema
* @return
*/
public org.everit.json.schema.Schema fromJsonSchema(Schema schema) {
return rawSchemaFromConnectSchema(schema);
} | 3.68 |
hbase_MultiTableHFileOutputFormat_configureIncrementalLoad | /**
* Analogous to
* {@link HFileOutputFormat2#configureIncrementalLoad(Job, TableDescriptor, RegionLocator)}, this
* function will configure the requisite number of reducers to write HFiles for multple tables
* simultaneously
* @param job See {@link org.apache.hadoop.mapreduce.Job}
* @param multiTableDescriptors Table descriptor and region locator pairs
*/
public static void configureIncrementalLoad(Job job, List<TableInfo> multiTableDescriptors)
throws IOException {
MultiTableHFileOutputFormat.configureIncrementalLoad(job, multiTableDescriptors,
MultiTableHFileOutputFormat.class);
} | 3.68 |
framework_Form_setFooter | /**
* Sets the layout that is rendered below normal form contents. No footer is
* rendered if this is set to null, .
*
* @param footer
* the new footer layout
*/
public void setFooter(Layout footer) {
if (getFooter() != null) {
getFooter().setParent(null);
}
getState().footer = footer;
if (footer != null) {
footer.setParent(this);
}
} | 3.68 |
framework_DateField_paintContent | /*
* Paints this component. Don't add a JavaDoc comment here, we use the
* default documentation from implemented interface.
*/
@Override
public void paintContent(PaintTarget target) throws PaintException {
// Adds the locale as attribute
final Locale l = getLocale();
if (l != null) {
target.addAttribute("locale", l.toString());
}
if (getDateFormat() != null) {
target.addAttribute("format", dateFormat);
}
if (!isLenient()) {
target.addAttribute("strict", true);
}
target.addAttribute(DateFieldConstants.ATTR_WEEK_NUMBERS,
isShowISOWeekNumbers());
target.addAttribute("parsable", uiHasValidDateString);
/*
* TODO communicate back the invalid date string? E.g. returning back to
* app or refresh.
*/
// Gets the calendar
final Calendar calendar = getCalendar();
final Date currentDate = getValue();
// Only paint variables for the resolution and up, e.g. Resolution DAY
// paints DAY,MONTH,YEAR
for (Resolution res : Resolution
.getResolutionsHigherOrEqualTo(resolution)) {
int value = -1;
if (currentDate != null) {
value = calendar.get(res.getCalendarField());
if (res == Resolution.MONTH) {
// Calendar month is zero based
value++;
}
}
target.addVariable(this, variableNameForResolution.get(res), value);
}
} | 3.68 |
hbase_ColumnFamilyDescriptorBuilder_setCompressionType | /**
* Compression types supported in hbase. LZO is not bundled as part of the hbase distribution.
* See See <a href="http://hbase.apache.org/book.html#lzo.compression">LZO Compression</a> for
* how to enable it.
* @param type Compression type setting.
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setCompressionType(Compression.Algorithm type) {
return setValue(COMPRESSION_BYTES, type.name());
} | 3.68 |
rocketmq-connect_MetricsReporter_onMeterRemoved | /**
* Called when a {@link Meter} is removed from the registry.
*
* @param name the meter's name
*/
public void onMeterRemoved(String name) {
this.onMeterRemoved(MetricUtils.stringToMetricName(name));
} | 3.68 |
hadoop_ManifestStoreOperationsThroughFileSystem_isFile | /**
* Using FileSystem.isFile to offer stores the option to optimize their probes.
* @param path path to probe
* @return true if the path resolves to a file.
* @throws IOException IO failure.
*/
@SuppressWarnings("deprecation")
@Override
public boolean isFile(Path path) throws IOException {
return fileSystem.isFile(path);
} | 3.68 |
graphhopper_GraphHopper_hasElevation | /**
* @return true if storing and fetching elevation data is enabled. Default is false
*/
public boolean hasElevation() {
return elevation;
} | 3.68 |
hadoop_AbstractRESTRequestInterceptor_setNextInterceptor | /**
* Sets the {@link RESTRequestInterceptor} in the chain.
*/
@Override
public void setNextInterceptor(RESTRequestInterceptor nextInterceptor) {
this.nextInterceptor = nextInterceptor;
}
/**
* Sets the {@link Configuration} | 3.68 |
zxing_GeoParsedResult_getAltitude | /**
* @return altitude in meters. If not specified, in the geo URI, returns 0.0
*/
public double getAltitude() {
return altitude;
} | 3.68 |
hudi_DirectWriteMarkers_doesMarkerDirExist | /**
* @return {@code true} if marker directory exists; {@code false} otherwise.
* @throws IOException
*/
public boolean doesMarkerDirExist() throws IOException {
return fs.exists(markerDirPath);
} | 3.68 |
hadoop_WordList_indexOf | /**
* Returns the index of the specified word in the list.
*/
public int indexOf(String word) {
return list.get(word);
} | 3.68 |
hudi_HoodieHeartbeatClient_start | /**
* Start a new heartbeat for the specified instant. If there is already one running, this will be a NO_OP
*
* @param instantTime The instant time for the heartbeat.
*/
public void start(String instantTime) {
LOG.info("Received request to start heartbeat for instant time " + instantTime);
Heartbeat heartbeat = instantToHeartbeatMap.get(instantTime);
ValidationUtils.checkArgument(heartbeat == null || !heartbeat.isHeartbeatStopped(), "Cannot restart a stopped heartbeat for " + instantTime);
if (heartbeat != null && heartbeat.isHeartbeatStarted()) {
// heartbeat already started, NO_OP
} else {
Heartbeat newHeartbeat = new Heartbeat();
newHeartbeat.setHeartbeatStarted(true);
instantToHeartbeatMap.put(instantTime, newHeartbeat);
// Ensure heartbeat is generated for the first time with this blocking call.
// Since timer submits the task to a thread, no guarantee when that thread will get CPU
// cycles to generate the first heartbeat.
updateHeartbeat(instantTime);
newHeartbeat.getTimer().scheduleAtFixedRate(new HeartbeatTask(instantTime), this.heartbeatIntervalInMs,
this.heartbeatIntervalInMs);
}
} | 3.68 |
framework_ButtonRenderer_setHtmlContentAllowed | /**
* Sets whether the data should be rendered as HTML (instead of text).
* <p>
* By default everything is rendered as text.
*
* @param htmlContentAllowed
* <code>true</code> to render as HTML, <code>false</code> to
* render as text
*
* @since 8.0.3
*/
public void setHtmlContentAllowed(boolean htmlContentAllowed) {
getState().htmlContentAllowed = htmlContentAllowed;
} | 3.68 |
hadoop_DynoInfraUtils_waitForNameNodeStartup | /**
* Wait for the launched NameNode to finish starting up. Continues until
* {@code shouldExit} returns true.
*
* @param nameNodeProperties The set of properties containing information
* about the NameNode.
* @param shouldExit Should return true iff this should stop waiting.
* @param log Where to log information.
*/
static void waitForNameNodeStartup(Properties nameNodeProperties,
Supplier<Boolean> shouldExit, Logger log)
throws IOException, InterruptedException {
if (shouldExit.get()) {
return;
}
log.info("Waiting for NameNode to finish starting up...");
waitForNameNodeJMXValue("Startup progress",
NAMENODE_STARTUP_PROGRESS_JMX_QUERY, "PercentComplete", 1.0, 0.01,
false, nameNodeProperties, shouldExit, log);
log.info("NameNode has started!");
} | 3.68 |
hbase_HFileCorruptionChecker_checkMobFile | /**
* Checks a path to see if it is a valid mob file. full Path to a mob file. This is a connectivity
* related exception
*/
protected void checkMobFile(Path p) throws IOException {
HFile.Reader r = null;
try {
r = HFile.createReader(fs, p, cacheConf, true, conf);
} catch (CorruptHFileException che) {
LOG.warn("Found corrupt mob file " + p, che);
corruptedMobFiles.add(p);
if (inQuarantineMode) {
Path dest = createQuarantinePath(p);
LOG.warn("Quarantining corrupt mob file " + p + " into " + dest);
boolean success = fs.mkdirs(dest.getParent());
success = success ? fs.rename(p, dest) : false;
if (!success) {
failureMobFiles.add(p);
} else {
quarantinedMobFiles.add(dest);
}
}
return;
} catch (FileNotFoundException fnfe) {
LOG.warn("Mob file " + p + " was missing. Likely removed due to compaction?");
missedMobFiles.add(p);
} finally {
mobFilesChecked.addAndGet(1);
if (r != null) {
r.close(true);
}
}
} | 3.68 |
hudi_HoodieCopyOnWriteTableInputFormat_listStatusForIncrementalMode | /**
* Achieves listStatus functionality for an incrementally queried table. Instead of listing all
* partitions and then filtering based on the commits of interest, this logic first extracts the
* partitions touched by the desired commits and then lists only those partitions.
*/
protected List<FileStatus> listStatusForIncrementalMode(JobConf job,
HoodieTableMetaClient tableMetaClient,
List<Path> inputPaths,
String incrementalTable) throws IOException {
Job jobContext = Job.getInstance(job);
Option<HoodieTimeline> timeline = HoodieInputFormatUtils.getFilteredCommitsTimeline(jobContext, tableMetaClient);
if (!timeline.isPresent()) {
return null;
}
Option<List<HoodieInstant>> commitsToCheck = HoodieInputFormatUtils.getCommitsForIncrementalQuery(jobContext, incrementalTable, timeline.get());
if (!commitsToCheck.isPresent()) {
return null;
}
Option<String> incrementalInputPaths = HoodieInputFormatUtils.getAffectedPartitions(commitsToCheck.get(), tableMetaClient, timeline.get(), inputPaths);
// Mutate the JobConf to set the input paths to only partitions touched by incremental pull.
if (!incrementalInputPaths.isPresent()) {
return null;
}
setInputPaths(job, incrementalInputPaths.get());
FileStatus[] fileStatuses = doListStatus(job);
return HoodieInputFormatUtils.filterIncrementalFileStatus(jobContext, tableMetaClient, timeline.get(), fileStatuses, commitsToCheck.get());
} | 3.68 |
framework_VTree_onBrowserEvent | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.user.client.ui.Widget#onBrowserEvent(com.google.gwt
* .user.client.Event)
*/
@Override
public void onBrowserEvent(Event event) {
super.onBrowserEvent(event);
final int type = DOM.eventGetType(event);
final Element target = DOM.eventGetTarget(event);
if (type == Event.ONLOAD && icon != null
&& target == icon.getElement()) {
iconLoaded.trigger();
}
if (disabled) {
return;
}
final boolean inCaption = isCaptionElement(target);
if (inCaption
&& client.hasEventListeners(VTree.this,
TreeConstants.ITEM_CLICK_EVENT_ID)
&& (type == Event.ONDBLCLICK || type == Event.ONMOUSEUP)) {
fireClick(event);
}
if (type == Event.ONCLICK) {
if (getElement() == target) {
// state change
toggleState();
} else if (!readonly && inCaption) {
if (selectable) {
// caption click = selection change && possible click
// event
if (handleClickSelection(
event.getCtrlKey() || event.getMetaKey(),
event.getShiftKey())) {
event.preventDefault();
}
} else {
// Not selectable, only focus the node.
setFocusedNode(this);
}
}
event.stopPropagation();
} else if (type == Event.ONCONTEXTMENU) {
showContextMenu(event);
}
if (dragMode != 0 || dropHandler != null) {
if (type == Event.ONMOUSEDOWN || type == Event.ONTOUCHSTART) {
if (nodeCaptionDiv.isOrHasChild(
(Node) event.getEventTarget().cast())) {
if (dragMode > 0 && (type == Event.ONTOUCHSTART || event
.getButton() == NativeEvent.BUTTON_LEFT)) {
mouseDownEvent = event; // save event for possible
// dd operation
if (type == Event.ONMOUSEDOWN) {
event.preventDefault(); // prevent text
// selection
} else {
/*
* FIXME We prevent touch start event to be used
* as a scroll start event. Note that we cannot
* easily distinguish whether the user wants to
* drag or scroll. The same issue is in table
* that has scrollable area and has drag and
* drop enable. Some kind of timer might be used
* to resolve the issue.
*/
event.stopPropagation();
}
}
}
} else if (type == Event.ONMOUSEMOVE || type == Event.ONMOUSEOUT
|| type == Event.ONTOUCHMOVE) {
if (mouseDownEvent != null) {
// start actual drag on slight move when mouse is down
VTransferable t = new VTransferable();
t.setDragSource(ConnectorMap.get(client)
.getConnector(VTree.this));
t.setData("itemId", key);
VDragEvent drag = VDragAndDropManager.get().startDrag(t,
mouseDownEvent, true);
drag.createDragImage(nodeCaptionDiv, true);
event.stopPropagation();
mouseDownEvent = null;
}
} else if (type == Event.ONMOUSEUP) {
mouseDownEvent = null;
}
if (type == Event.ONMOUSEOVER) {
mouseDownEvent = null;
currentMouseOverKey = key;
event.stopPropagation();
}
} else if (type == Event.ONMOUSEDOWN
&& event.getButton() == NativeEvent.BUTTON_LEFT) {
event.preventDefault(); // text selection
}
} | 3.68 |
framework_VTextField_setMaxLength | /** For internal use only. May be removed or replaced in the future. */
public void setMaxLength(int newMaxLength) {
if (newMaxLength == maxLength) {
return;
}
maxLength = newMaxLength;
updateMaxLength(maxLength);
} | 3.68 |
flink_CsvReader_ignoreInvalidLines | /**
* Sets the CSV reader to ignore any invalid lines. This is useful for files that contain an
* empty line at the end, multiple header lines or comments. This would throw an exception
* otherwise.
*
* @return The CSV reader instance itself, to allow for fluent function chaining.
*/
public CsvReader ignoreInvalidLines() {
ignoreInvalidLines = true;
return this;
} | 3.68 |
flink_Tuple25_setFields | /**
* Sets new values to all fields of the tuple.
*
* @param f0 The value for field 0
* @param f1 The value for field 1
* @param f2 The value for field 2
* @param f3 The value for field 3
* @param f4 The value for field 4
* @param f5 The value for field 5
* @param f6 The value for field 6
* @param f7 The value for field 7
* @param f8 The value for field 8
* @param f9 The value for field 9
* @param f10 The value for field 10
* @param f11 The value for field 11
* @param f12 The value for field 12
* @param f13 The value for field 13
* @param f14 The value for field 14
* @param f15 The value for field 15
* @param f16 The value for field 16
* @param f17 The value for field 17
* @param f18 The value for field 18
* @param f19 The value for field 19
* @param f20 The value for field 20
* @param f21 The value for field 21
* @param f22 The value for field 22
* @param f23 The value for field 23
* @param f24 The value for field 24
*/
public void setFields(
T0 f0,
T1 f1,
T2 f2,
T3 f3,
T4 f4,
T5 f5,
T6 f6,
T7 f7,
T8 f8,
T9 f9,
T10 f10,
T11 f11,
T12 f12,
T13 f13,
T14 f14,
T15 f15,
T16 f16,
T17 f17,
T18 f18,
T19 f19,
T20 f20,
T21 f21,
T22 f22,
T23 f23,
T24 f24) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
this.f4 = f4;
this.f5 = f5;
this.f6 = f6;
this.f7 = f7;
this.f8 = f8;
this.f9 = f9;
this.f10 = f10;
this.f11 = f11;
this.f12 = f12;
this.f13 = f13;
this.f14 = f14;
this.f15 = f15;
this.f16 = f16;
this.f17 = f17;
this.f18 = f18;
this.f19 = f19;
this.f20 = f20;
this.f21 = f21;
this.f22 = f22;
this.f23 = f23;
this.f24 = f24;
} | 3.68 |
framework_Embedded_addClickListener | /**
* Add a click listener to the component. The listener is called whenever
* the user clicks inside the component. Depending on the content the event
* may be blocked and in that case no event is fired.
*
* @see Registration
*
* @param listener
* The listener to add
* @return a registration object for removing the listener
* @since 8.0
*/
public Registration addClickListener(ClickListener listener) {
return addListener(EventId.CLICK_EVENT_IDENTIFIER, ClickEvent.class,
listener, ClickListener.clickMethod);
} | 3.68 |
hbase_IpcClientSpanBuilder_getRpcName | /**
* Retrieve the {@code $method} value from {@code md}.
*/
public static String getRpcName(final Descriptors.MethodDescriptor md) {
return md.getName();
} | 3.68 |
streampipes_StatementHandler_executePreparedStatement | /**
* Clears, fills and executes the saved prepared statement {@code ps} with the data found in
* event. To fill in the values it calls
* {@link StatementHandler#fillPreparedStatement(DbDescription, TableDescription, Connection, Map, String)}.
*
* @param event Data to be saved in the SQL table
* @throws SQLException When the statement cannot be executed
* @throws SpRuntimeException When the table name is not allowed or it is thrown
* by {@link org.apache.streampipes.sinks.databases.jvm.jdbcclient.utils.StatementUtils
* #setValue(ParameterInformation, Object, PreparedStatement)}
*/
public void executePreparedStatement(DbDescription dbDescription, TableDescription tableDescription,
Connection connection, final Map<String, Object> event)
throws SQLException, SpRuntimeException {
if (this.getPreparedStatement() != null) {
this.preparedStatement.clearParameters();
}
fillPreparedStatement(dbDescription, tableDescription, connection, event, "");
this.preparedStatement.executeUpdate();
} | 3.68 |
hudi_HoodieMetadataTableValidator_validateLatestBaseFiles | /**
* Compare getLatestBaseFiles between metadata table and fileSystem.
*/
private void validateLatestBaseFiles(
HoodieMetadataValidationContext metadataTableBasedContext,
HoodieMetadataValidationContext fsBasedContext,
String partitionPath,
Set<String> baseDataFilesForCleaning) {
List<HoodieBaseFile> latestFilesFromMetadata;
List<HoodieBaseFile> latestFilesFromFS;
if (!baseDataFilesForCleaning.isEmpty()) {
latestFilesFromMetadata = filterBaseFileBasedOnInflightCleaning(metadataTableBasedContext.getSortedLatestBaseFileList(partitionPath), baseDataFilesForCleaning);
latestFilesFromFS = filterBaseFileBasedOnInflightCleaning(fsBasedContext.getSortedLatestBaseFileList(partitionPath), baseDataFilesForCleaning);
} else {
latestFilesFromMetadata = metadataTableBasedContext.getSortedLatestBaseFileList(partitionPath);
latestFilesFromFS = fsBasedContext.getSortedLatestBaseFileList(partitionPath);
}
LOG.debug("Latest base file from metadata: " + latestFilesFromMetadata + ". For partitions " + partitionPath);
LOG.debug("Latest base file from direct listing: " + latestFilesFromFS + ". For partitions " + partitionPath);
validate(latestFilesFromMetadata, latestFilesFromFS, partitionPath, "latest base files");
} | 3.68 |
rocketmq-connect_ProcessingContext_failed | /**
* @return
*/
public boolean failed() {
return error() != null;
} | 3.68 |
flink_CompositeTypeSerializerSnapshot_resolveOuterSchemaCompatibility | /**
* Checks the schema compatibility of the given new serializer based on the outer snapshot.
*
* <p>The base implementation of this method assumes that the outer serializer only has nested
* serializers and no extra information, and therefore the result of the check is {@link
* OuterSchemaCompatibility#COMPATIBLE_AS_IS}. Otherwise, if the outer serializer contains some
* extra information that has been persisted as part of the serializer snapshot, this must be
* overridden. Note that this method and the corresponding methods {@link
* #writeOuterSnapshot(DataOutputView)}, {@link #readOuterSnapshot(int, DataInputView,
* ClassLoader)} needs to be implemented.
*
* @param newSerializer the new serializer, which contains the new outer information to check
* against.
* @return a {@link OuterSchemaCompatibility} indicating whether or the new serializer's outer
* information is compatible, requires migration, or incompatible with the one written in
* this snapshot.
*/
protected OuterSchemaCompatibility resolveOuterSchemaCompatibility(S newSerializer) {
return (isOuterSnapshotCompatible(newSerializer))
? OuterSchemaCompatibility.COMPATIBLE_AS_IS
: OuterSchemaCompatibility.INCOMPATIBLE;
} | 3.68 |
flink_FlinkHintStrategies_createHintStrategyTable | /**
* Customize the {@link HintStrategyTable} which contains hint strategies supported by Flink.
*/
public static HintStrategyTable createHintStrategyTable() {
return HintStrategyTable.builder()
// Configure to always throw when we encounter any hint errors
// (either the non-registered hint or the hint format).
.errorHandler(Litmus.THROW)
.hintStrategy(
FlinkHints.HINT_NAME_OPTIONS,
HintStrategy.builder(HintPredicates.TABLE_SCAN)
.optionChecker(OPTIONS_KV_OPTION_CHECKER)
.build())
.hintStrategy(
FlinkHints.HINT_NAME_JSON_AGGREGATE_WRAPPED,
HintStrategy.builder(HintPredicates.AGGREGATE)
.excludedRules(WrapJsonAggFunctionArgumentsRule.INSTANCE)
.build())
// internal join hint used for alias
.hintStrategy(
FlinkHints.HINT_ALIAS,
// currently, only correlate&join hints care about query block alias
HintStrategy.builder(
HintPredicates.or(
HintPredicates.CORRELATE, HintPredicates.JOIN))
.optionChecker(fixedSizeListOptionChecker(1))
.build())
// TODO semi/anti join with CORRELATE is not supported
.hintStrategy(
JoinStrategy.BROADCAST.getJoinHintName(),
HintStrategy.builder(HintPredicates.JOIN)
.optionChecker(NON_EMPTY_LIST_OPTION_CHECKER)
.build())
.hintStrategy(
JoinStrategy.SHUFFLE_HASH.getJoinHintName(),
HintStrategy.builder(HintPredicates.JOIN)
.optionChecker(NON_EMPTY_LIST_OPTION_CHECKER)
.build())
.hintStrategy(
JoinStrategy.SHUFFLE_MERGE.getJoinHintName(),
HintStrategy.builder(HintPredicates.JOIN)
.optionChecker(NON_EMPTY_LIST_OPTION_CHECKER)
.build())
.hintStrategy(
JoinStrategy.NEST_LOOP.getJoinHintName(),
HintStrategy.builder(HintPredicates.JOIN)
.optionChecker(NON_EMPTY_LIST_OPTION_CHECKER)
.build())
.hintStrategy(
JoinStrategy.LOOKUP.getJoinHintName(),
HintStrategy.builder(
HintPredicates.or(
HintPredicates.CORRELATE, HintPredicates.JOIN))
.optionChecker(LOOKUP_NON_EMPTY_KV_OPTION_CHECKER)
.build())
.build();
} | 3.68 |
hbase_ObserverContextImpl_shouldBypass | /**
* @return {@code true}, if {@link ObserverContext#bypass()} was called by one of the loaded
* coprocessors, {@code false} otherwise.
*/
public boolean shouldBypass() {
if (!isBypassable()) {
return false;
}
if (bypass) {
bypass = false;
return true;
}
return false;
} | 3.68 |
flink_MessageParameter_resolve | /**
* Resolves this parameter for the given value.
*
* @param value value to resolve this parameter with
*/
public final void resolve(X value) {
Preconditions.checkState(!resolved, "This parameter was already resolved.");
this.value = Preconditions.checkNotNull(value);
this.resolved = true;
} | 3.68 |
rocketmq-connect_WorkerSourceTask_convertTransformedRecord | /**
* Convert the source record into a producer record.
*/
protected Message convertTransformedRecord(final String topic, ConnectRecord record) {
if (record == null) {
return null;
}
Message sourceMessage = new Message();
sourceMessage.setTopic(topic);
byte[] key = retryWithToleranceOperator.execute(() -> keyConverter.fromConnectData(topic, record.getKeySchema(), record.getKey()),
ErrorReporter.Stage.CONVERTER, keyConverter.getClass());
byte[] value = retryWithToleranceOperator.execute(() -> valueConverter.fromConnectData(topic, record.getSchema(), record.getData()),
ErrorReporter.Stage.CONVERTER, valueConverter.getClass());
if (value.length > ConnectorConfig.MAX_MESSAGE_SIZE) {
log.error("Send record, message size is greater than {} bytes, record: {}", ConnectorConfig.MAX_MESSAGE_SIZE, JSON.toJSONString(record));
}
if (key != null) {
sourceMessage.setKeys(new String(key));
}
sourceMessage.setBody(value);
if (retryWithToleranceOperator.failed()) {
return null;
}
// put extend msg property
putExtendMsgProperty(record, sourceMessage, topic);
return sourceMessage;
} | 3.68 |
hudi_JavaUpsertPartitioner_getSmallFiles | /**
* Returns a list of small files in the given partition path.
*/
protected List<SmallFile> getSmallFiles(String partitionPath) {
// smallFiles only for partitionPath
List<SmallFile> smallFileLocations = new ArrayList<>();
HoodieTimeline commitTimeline = table.getMetaClient().getCommitsTimeline().filterCompletedInstants();
if (!commitTimeline.empty()) { // if we have some commits
HoodieInstant latestCommitTime = commitTimeline.lastInstant().get();
List<HoodieBaseFile> allFiles = table.getBaseFileOnlyView()
.getLatestBaseFilesBeforeOrOn(partitionPath, latestCommitTime.getTimestamp()).collect(Collectors.toList());
for (HoodieBaseFile file : allFiles) {
if (file.getFileSize() < config.getParquetSmallFileLimit()) {
SmallFile sf = new SmallFile();
sf.location = new HoodieRecordLocation(file.getCommitTime(), file.getFileId());
sf.sizeBytes = file.getFileSize();
smallFileLocations.add(sf);
}
}
}
return smallFileLocations;
} | 3.68 |
hmily_SubCoordinator_doOnePhaseCommit | /**
* 表示为第一阶断的直接提交.
*/
private void doOnePhaseCommit() throws TransactionRolledbackException {
state = XaState.STATUS_COMMITTING;
HmilyXaResource xaResource = (HmilyXaResource) resources.get(0);
try {
xaResource.commit(true);
state = XaState.STATUS_COMMITTED;
} catch (XAException ex) {
state = XaState.STATUS_UNKNOWN;
logger.error("xa commit error{}:{}", xaResource, HmilyXaException.getMessage(ex));
if (Objects.equals(ex.errorCode, XAException.XA_RBROLLBACK)) {
throw new TransactionRolledbackException("XAException:" + ex.getMessage());
}
throw new RuntimeException("XAException" + ex.getMessage());
} finally {
afterCompletion();
}
} | 3.68 |
flink_SqlFunctionUtils_hex | /** Returns the hex string of a string argument. */
public static String hex(String x) {
return EncodingUtils.hex(x.getBytes(StandardCharsets.UTF_8)).toUpperCase();
} | 3.68 |
hadoop_FederationClientMethod_getTypes | /**
* Get the calling types for this method.
*
* @return An array of calling types.
*/
public Class<?>[] getTypes() {
return Arrays.copyOf(this.types, this.types.length);
} | 3.68 |
querydsl_ClassPathUtils_scanPackage | /**
* Return the classes from the given package and subpackages using the supplied classloader
*
* @param classLoader classloader to be used
* @param pkg package to scan
* @return set of found classes
* @throws IOException
*/
public static Set<Class<?>> scanPackage(ClassLoader classLoader, String pkg) throws IOException {
return new ClassGraph()
.enableClassInfo()
.acceptPackages(pkg)
.rejectPackages("com.sun", "com.apple")
.overrideClassLoaders(classLoader)
.scan()
.getAllClasses()
.stream().map(info -> safeClassForName(classLoader, info.getName()))
.collect(Collectors.toSet());
} | 3.68 |
hadoop_MsiTokenProvider_isTokenAboutToExpire | /**
* Checks if the token is about to expire as per base expiry logic.
* Otherwise try to expire every 1 hour
*
* @return true if the token is expiring in next 1 hour or if a token has
* never been fetched
*/
@Override
protected boolean isTokenAboutToExpire() {
if (tokenFetchTime == -1 || super.isTokenAboutToExpire()) {
return true;
}
boolean expiring = false;
long elapsedTimeSinceLastTokenRefreshInMillis =
System.currentTimeMillis() - tokenFetchTime;
expiring = elapsedTimeSinceLastTokenRefreshInMillis >= ONE_HOUR
|| elapsedTimeSinceLastTokenRefreshInMillis < 0;
// In case of, Token is not refreshed for 1 hr or any clock skew issues,
// refresh token.
if (expiring) {
LOG.debug("MSIToken: token renewing. Time elapsed since last token fetch:"
+ " {} milli seconds", elapsedTimeSinceLastTokenRefreshInMillis);
}
return expiring;
} | 3.68 |
hbase_PermissionStorage_userPermissionKey | /**
* Build qualifier key from user permission: username username,family username,family,qualifier
*/
static byte[] userPermissionKey(UserPermission permission) {
byte[] key = Bytes.toBytes(permission.getUser());
byte[] qualifier = null;
byte[] family = null;
if (permission.getPermission().getAccessScope() == Permission.Scope.TABLE) {
TablePermission tablePermission = (TablePermission) permission.getPermission();
family = tablePermission.getFamily();
qualifier = tablePermission.getQualifier();
}
if (family != null && family.length > 0) {
key = Bytes.add(key, Bytes.add(new byte[] { ACL_KEY_DELIMITER }, family));
if (qualifier != null && qualifier.length > 0) {
key = Bytes.add(key, Bytes.add(new byte[] { ACL_KEY_DELIMITER }, qualifier));
}
}
return key;
} | 3.68 |
hbase_ServerManager_checkClockSkew | /**
* Checks if the clock skew between the server and the master. If the clock skew exceeds the
* configured max, it will throw an exception; if it exceeds the configured warning threshold, it
* will log a warning but start normally.
* @param serverName Incoming servers's name
* @throws ClockOutOfSyncException if the skew exceeds the configured max value
*/
private void checkClockSkew(final ServerName serverName, final long serverCurrentTime)
throws ClockOutOfSyncException {
long skew = Math.abs(EnvironmentEdgeManager.currentTime() - serverCurrentTime);
if (skew > maxSkew) {
String message = "Server " + serverName + " has been "
+ "rejected; Reported time is too far out of sync with master. " + "Time difference of "
+ skew + "ms > max allowed of " + maxSkew + "ms";
LOG.warn(message);
throw new ClockOutOfSyncException(message);
} else if (skew > warningSkew) {
String message = "Reported time for server " + serverName + " is out of sync with master "
+ "by " + skew + "ms. (Warning threshold is " + warningSkew + "ms; " + "error threshold is "
+ maxSkew + "ms)";
LOG.warn(message);
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.