name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_PlacementConstraints_targetNotIn | /**
* Creates a constraint that requires allocations to be placed on nodes that
* belong to a scope (e.g., node or rack) that does not satisfy any of the
* target expressions.
*
* @param scope the scope within which the target expressions should not be
* true
* @param targetExpressions the expressions that need to not be true within
* the scope
* @return the resulting placement constraint
*/
public static AbstractConstraint targetNotIn(String scope,
TargetExpression... targetExpressions) {
return new SingleConstraint(scope, 0, 0, targetExpressions);
} | 3.68 |
graphhopper_PbfBlobResult_getEntities | /**
* Gets the entities decoded from the blob. This is only valid after complete becomes true, and
* if success is true.
* <p>
*
* @return The list of decoded entities.
*/
public List<ReaderElement> getEntities() {
return entities;
} | 3.68 |
querydsl_JTSGeometryExpression_isEmpty | /**
* Returns 1 (TRUE) if this geometric object is the empty Geometry. If true, then this
* geometric object represents the empty point set ∅ for the coordinate space.
*
* @return empty
*/
public BooleanExpression isEmpty() {
if (empty == null) {
empty = Expressions.booleanOperation(SpatialOps.IS_EMPTY, mixin);
}
return empty;
} | 3.68 |
framework_BeanItem_getPropertyDescriptors | /**
* <p>
* Perform introspection on a Java Bean class to find its properties.
* </p>
*
* <p>
* Note : This version only supports introspectable bean properties and
* their getter and setter methods. Stand-alone <code>is</code> and
* <code>are</code> methods are not supported.
* </p>
*
* @param beanClass
* the Java Bean class to get properties for.
* @return an ordered map from property names to property descriptors
*/
static <BT> LinkedHashMap<String, VaadinPropertyDescriptor<BT>> getPropertyDescriptors(
final Class<BT> beanClass) {
final LinkedHashMap<String, VaadinPropertyDescriptor<BT>> pdMap = new LinkedHashMap<String, VaadinPropertyDescriptor<BT>>();
// Try to introspect, if it fails, we just have an empty Item
try {
List<PropertyDescriptor> propertyDescriptors = BeanUtil
.getBeanPropertyDescriptors(beanClass);
// Add all the bean properties as MethodProperties to this Item
// later entries on the list overwrite earlier ones
for (PropertyDescriptor pd : propertyDescriptors) {
final Method getMethod = pd.getReadMethod();
if ((getMethod != null)
&& getMethod.getDeclaringClass() != Object.class) {
VaadinPropertyDescriptor<BT> vaadinPropertyDescriptor = new MethodPropertyDescriptor<BT>(
pd.getName(), pd.getPropertyType(),
pd.getReadMethod(), pd.getWriteMethod());
pdMap.put(pd.getName(), vaadinPropertyDescriptor);
}
}
} catch (final IntrospectionException ignored) {
}
return pdMap;
} | 3.68 |
morf_GraphBasedUpgradeTraversalService_nextNode | /**
* @return the next node to be executed if such node is available
*/
public Optional<GraphBasedUpgradeNode> nextNode() {
lock.lock();
try {
Optional<GraphBasedUpgradeNode> nextNode = readyToExecuteNodes.stream()
.min(Comparator.comparing(GraphBasedUpgradeNode::getSequence));
if (nextNode.isPresent()) {
readyToExecuteNodes.remove(nextNode.get());
LOG.debug("Returning next node to be processed: " + nextNode.get().getName());
} else {
LOG.debug("No node ready to be processed is available.");
}
return nextNode;
} finally {
lock.unlock();
}
} | 3.68 |
hbase_MutableFastCounter_incr | /**
* Increment the value by a delta
* @param delta of the increment
*/
public void incr(long delta) {
counter.add(delta);
setChanged();
} | 3.68 |
framework_AtmospherePushConnection_onTransportFailure | /**
* Called if the transport mechanism cannot be used and the fallback will be
* tried.
*/
protected void onTransportFailure() {
getLogger().warning("Push connection using primary method ("
+ getConfig().getTransport() + ") failed. Trying with "
+ getConfig().getFallbackTransport());
} | 3.68 |
framework_SQLContainer_getReferencedItemId | /**
* Fetches the Item Id of the referenced item from the target SQLContainer.
*
* @param itemId
* Item Id of the reference source (from this container)
* @param refdCont
* Target SQLContainer of the reference
* @return Item Id of the referenced item, or null if not found
*/
public Object getReferencedItemId(Object itemId, SQLContainer refdCont) {
if (refdCont == null) {
throw new IllegalArgumentException(
"Referenced SQLContainer can not be null.");
}
Reference r = references.get(refdCont);
if (r == null) {
throw new IllegalArgumentException(
"Reference to the given SQLContainer not defined.");
}
Object refKey = getContainerProperty(itemId, r.getReferencingColumn())
.getValue();
refdCont.removeAllContainerFilters();
refdCont.addContainerFilter(new Equal(r.getReferencedColumn(), refKey));
Object toReturn = refdCont.firstItemId();
refdCont.removeAllContainerFilters();
return toReturn;
} | 3.68 |
hadoop_RouterClientRMService_getPipelines | /**
* Gets the Request interceptor chains for all the users.
*
* @return the request interceptor chains.
*/
@VisibleForTesting
protected Map<String, RequestInterceptorChainWrapper> getPipelines() {
return this.userPipelineMap;
} | 3.68 |
hbase_MetaFixer_createMetaEntries | /**
* Create entries in the {@code hbase:meta} for each provided {@link RegionInfo}. Best effort.
* @param masterServices used to connect to {@code hbase:meta}
* @param newRegionInfos the new {@link RegionInfo} entries to add to the filesystem
* @return a list of {@link RegionInfo} entries for which {@code hbase:meta} entries were
* successfully created
*/
private static List<RegionInfo> createMetaEntries(final MasterServices masterServices,
final List<RegionInfo> newRegionInfos) {
final List<Either<List<RegionInfo>, IOException>> addMetaEntriesResults =
newRegionInfos.stream().map(regionInfo -> {
try {
TableDescriptor td = masterServices.getTableDescriptors().get(regionInfo.getTable());
// Add replicas if needed
// we need to create regions with replicaIds starting from 1
List<RegionInfo> newRegions = RegionReplicaUtil
.addReplicas(Collections.singletonList(regionInfo), 1, td.getRegionReplication());
// Add regions to META
MetaTableAccessor.addRegionsToMeta(masterServices.getConnection(), newRegions,
td.getRegionReplication());
return Either.<List<RegionInfo>, IOException> ofLeft(newRegions);
} catch (IOException e) {
return Either.<List<RegionInfo>, IOException> ofRight(e);
}
}).collect(Collectors.toList());
final List<RegionInfo> createMetaEntriesSuccesses =
addMetaEntriesResults.stream().filter(Either::hasLeft).map(Either::getLeft)
.flatMap(List::stream).collect(Collectors.toList());
final List<IOException> createMetaEntriesFailures = addMetaEntriesResults.stream()
.filter(Either::hasRight).map(Either::getRight).collect(Collectors.toList());
LOG.debug("Added {}/{} entries to hbase:meta", createMetaEntriesSuccesses.size(),
newRegionInfos.size());
if (!createMetaEntriesFailures.isEmpty()) {
LOG.warn(
"Failed to create entries in hbase:meta for {}/{} RegionInfo descriptors. First"
+ " failure message included; full list of failures with accompanying stack traces is"
+ " available at log level DEBUG. message={}",
createMetaEntriesFailures.size(), addMetaEntriesResults.size(),
createMetaEntriesFailures.get(0).getMessage());
if (LOG.isDebugEnabled()) {
createMetaEntriesFailures
.forEach(ioe -> LOG.debug("Attempt to fix region hole in hbase:meta failed.", ioe));
}
}
return createMetaEntriesSuccesses;
} | 3.68 |
morf_AuditRecordHelper_createAuditInsertStatement | /**
* Returns an {@link InsertStatement} used to be added to the upgrade audit table.
*
* @param uuid The UUID of the step which has been applied
* @param description The description of the step.
* @return The insert statement
*/
public static InsertStatement createAuditInsertStatement(UUID uuid, String description) {
InsertStatement auditRecord = new InsertStatement().into(
new TableReference("UpgradeAudit")).values(
new FieldLiteral(uuid.toString()).as("upgradeUUID"),
new FieldLiteral(description).as("description"),
cast(dateToYyyyMMddHHmmss(now())).asType(DataType.DECIMAL, 14).as("appliedTime")
);
return auditRecord;
} | 3.68 |
hbase_ZKUtil_getNumberOfChildren | /**
* Get the number of children of the specified node. If the node does not exist or has no
* children, returns 0. Sets no watches at all.
* @param zkw zk reference
* @param znode path of node to count children of
* @return number of children of specified node, 0 if none or parent does not exist
* @throws KeeperException if unexpected zookeeper exception
*/
public static int getNumberOfChildren(ZKWatcher zkw, String znode) throws KeeperException {
try {
Stat stat = zkw.getRecoverableZooKeeper().exists(znode, null);
return stat == null ? 0 : stat.getNumChildren();
} catch (KeeperException e) {
LOG.warn(zkw.prefix("Unable to get children of node " + znode));
zkw.keeperException(e);
} catch (InterruptedException e) {
zkw.interruptedException(e);
}
return 0;
} | 3.68 |
hadoop_S3ARemoteInputStream_read | /**
* Reads up to {@code len} bytes from this stream and copies them into
* the given {@code buffer} starting at the given {@code offset}.
* Returns the number of bytes actually copied in to the given buffer.
*
* @param buffer the buffer to copy data into.
* @param offset data is copied starting at this offset.
* @param len max number of bytes to copy.
* @return the number of bytes actually copied in to the given buffer.
* @throws IOException if there is an IO error during this operation.
*/
@Override
public int read(byte[] buffer, int offset, int len) throws IOException {
throwIfClosed();
if (len == 0) {
return 0;
}
if (remoteObject.size() == 0
|| nextReadPos >= remoteObject.size()) {
return -1;
}
if (!ensureCurrentBuffer()) {
return -1;
}
int numBytesRead = 0;
int numBytesRemaining = len;
while (numBytesRemaining > 0) {
if (!ensureCurrentBuffer()) {
break;
}
ByteBuffer buf = fpos.buffer();
int bytesToRead = Math.min(numBytesRemaining, buf.remaining());
buf.get(buffer, offset, bytesToRead);
nextReadPos += bytesToRead;
incrementBytesRead(bytesToRead);
offset += bytesToRead;
numBytesRemaining -= bytesToRead;
numBytesRead += bytesToRead;
}
return numBytesRead;
} | 3.68 |
hadoop_EmptyS3AStatisticsContext_getIOStatistics | /**
* Return an IO statistics instance.
* @return an empty IO statistics instance.
*/
@Override
public IOStatistics getIOStatistics() {
return emptyStatistics();
} | 3.68 |
hbase_MultiByteBuff_get | /**
* Copies the content from an this MBB to a ByteBuffer
* @param out the ByteBuffer to which the copy has to happen, its position will be
* advanced.
* @param sourceOffset the offset in the MBB from which the elements has to be copied
* @param length the length in the MBB upto which the elements has to be copied
*/
@Override
public void get(ByteBuffer out, int sourceOffset, int length) {
checkRefCount();
int itemIndex = getItemIndex(sourceOffset);
ByteBuffer in = this.items[itemIndex];
sourceOffset = sourceOffset - this.itemBeginPos[itemIndex];
while (length > 0) {
int toRead = Math.min(in.limit() - sourceOffset, length);
ByteBufferUtils.copyFromBufferToBuffer(in, out, sourceOffset, toRead);
length -= toRead;
if (length == 0) {
break;
}
itemIndex++;
in = this.items[itemIndex];
sourceOffset = 0;
}
} | 3.68 |
hadoop_StageConfig_withSuccessMarkerFileLimit | /**
* Number of marker files to include in success file.
* @param value new value
* @return the builder
*/
public StageConfig withSuccessMarkerFileLimit(final int value) {
checkOpen();
successMarkerFileLimit = value;
return this;
} | 3.68 |
flink_OperatorStateCheckpointOutputStream_closeAndGetHandle | /** This method should not be public so as to not expose internals to user code. */
@Override
OperatorStateHandle closeAndGetHandle() throws IOException {
StreamStateHandle streamStateHandle = super.closeAndGetHandleAfterLeasesReleased();
if (null == streamStateHandle) {
return null;
}
if (partitionOffsets.isEmpty() && delegate.getPos() > initialPosition) {
startNewPartition();
}
Map<String, OperatorStateHandle.StateMetaInfo> offsetsMap =
CollectionUtil.newHashMapWithExpectedSize(1);
OperatorStateHandle.StateMetaInfo metaInfo =
new OperatorStateHandle.StateMetaInfo(
partitionOffsets.toArray(), OperatorStateHandle.Mode.SPLIT_DISTRIBUTE);
offsetsMap.put(DefaultOperatorStateBackend.DEFAULT_OPERATOR_STATE_NAME, metaInfo);
return new OperatorStreamStateHandle(offsetsMap, streamStateHandle);
} | 3.68 |
hbase_WALSplitter_getReader | /**
* Create a new {@link WALStreamReader} for reading logs to split.
* @return new Reader instance, caller should close
*/
private WALStreamReader getReader(Path curLogFile, CancelableProgressable reporter)
throws IOException {
return walFactory.createStreamReader(walFS, curLogFile, reporter);
} | 3.68 |
framework_VComboBox_isWaitingForFilteringResponse | /**
* For internal use only - this method will be removed in the future.
*
* @return true if the combo box is waiting for a reply from the server
* with a new page of data, false otherwise
*/
public boolean isWaitingForFilteringResponse() {
return waitingForFilteringResponse;
} | 3.68 |
pulsar_ResourceGroupService_resourceGroupUpdate | /**
* Update RG.
*
* @throws if RG with that name does not exist.
*/
public void resourceGroupUpdate(String rgName, org.apache.pulsar.common.policies.data.ResourceGroup rgConfig)
throws PulsarAdminException {
if (rgConfig == null) {
throw new IllegalArgumentException("ResourceGroupUpdate: Invalid null ResourceGroup config");
}
ResourceGroup rg = this.getResourceGroupInternal(rgName);
if (rg == null) {
throw new PulsarAdminException("Resource group does not exist: " + rgName);
}
rg.updateResourceGroup(rgConfig);
rgUpdates.labels(rgName).inc();
} | 3.68 |
flink_ReduceOperator_setCombineHint | /**
* Sets the strategy to use for the combine phase of the reduce.
*
* <p>If this method is not called, then the default hint will be used. ({@link
* org.apache.flink.api.common.operators.base.ReduceOperatorBase.CombineHint#OPTIMIZER_CHOOSES})
*
* @param strategy The hint to use.
* @return The ReduceOperator object, for function call chaining.
*/
@PublicEvolving
public ReduceOperator<IN> setCombineHint(CombineHint strategy) {
this.hint = strategy;
return this;
} | 3.68 |
framework_FocusableGrid_addFocusHandler | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.dom.client.HasFocusHandlers#addFocusHandler(com.
* google.gwt.event.dom.client.FocusHandler)
*/
@Override
public HandlerRegistration addFocusHandler(FocusHandler handler) {
return addDomHandler(handler, FocusEvent.getType());
} | 3.68 |
hadoop_ServletUtil_getRawPath | /**
* Parse the path component from the given request and return w/o decoding.
* @param request Http request to parse
* @param servletName the name of servlet that precedes the path
* @return path component, null if the default charset is not supported
*/
public static String getRawPath(final HttpServletRequest request, String servletName) {
Preconditions.checkArgument(request.getRequestURI().startsWith(servletName+"/"));
return request.getRequestURI().substring(servletName.length());
} | 3.68 |
querydsl_SQLSerializer_getIdentifierColumns | /**
* Return a list of expressions that can be used to uniquely define the query sources
*
* @param joins
* @return identifier columns
*/
@SuppressWarnings("unchecked")
protected List<Expression<?>> getIdentifierColumns(List<JoinExpression> joins, boolean alias) {
if (joins.size() == 1) {
JoinExpression join = joins.get(0);
if (join.getTarget() instanceof RelationalPath) {
return ((RelationalPath) join.getTarget()).getColumns();
} else {
return Collections.emptyList();
}
} else {
List<Expression<?>> rv = new ArrayList<>();
int counter = 0;
for (JoinExpression join : joins) {
if (join.getTarget() instanceof RelationalPath) {
RelationalPath path = (RelationalPath) join.getTarget();
List<Expression<?>> columns;
if (path.getPrimaryKey() != null) {
columns = path.getPrimaryKey().getLocalColumns();
} else {
columns = path.getColumns();
}
if (alias) {
for (Expression<?> column : columns) {
rv.add(ExpressionUtils.as(column, "col" + (++counter)));
}
} else {
rv.addAll(columns);
}
} else {
// not able to provide a distinct list of columns
return Collections.emptyList();
}
}
return rv;
}
} | 3.68 |
flink_CloseableRegistry_doClose | /**
* This implementation doesn't imply any exception during closing due to backward compatibility.
*/
@Override
public void doClose(List<Closeable> toClose) throws IOException {
IOUtils.closeAllQuietly(reverse(toClose));
} | 3.68 |
hbase_QuotaObserverChore_hasNamespaceQuota | /**
* Returns true if the table exists in a namespace with a namespace quota.
*/
public boolean hasNamespaceQuota(TableName tn) {
return tablesWithNamespaceQuotas.contains(tn);
} | 3.68 |
hbase_RestoreSnapshotHelper_getTableRegionFamilyFiles | /** Returns The set of files in the specified family directory. */
private Set<String> getTableRegionFamilyFiles(final Path familyDir) throws IOException {
FileStatus[] hfiles = CommonFSUtils.listStatus(fs, familyDir);
if (hfiles == null) {
return Collections.emptySet();
}
Set<String> familyFiles = new HashSet<>(hfiles.length);
for (int i = 0; i < hfiles.length; ++i) {
String hfileName = hfiles[i].getPath().getName();
familyFiles.add(hfileName);
}
return familyFiles;
} | 3.68 |
hadoop_IFileWrappedMapOutput_getMerger | /**
* @return the merger
*/
protected MergeManagerImpl<K, V> getMerger() {
return merger;
} | 3.68 |
framework_Page_addPopStateListener | /**
* Adds a listener that gets notified every time the URI of this page is
* changed due to back/forward functionality of the browser.
* <p>
* Note that one only gets notified when the back/forward button affects
* history changes with-in same UI, created by
* {@link Page#pushState(String)} or {@link Page#replaceState(String)}
* functions.
*
* @see #getLocation()
* @see Registration
*
* @param listener
* the Popstate listener to add
* @return a registration object for removing the listener
* @since 8.0
*/
public Registration addPopStateListener(Page.PopStateListener listener) {
return addListener(PopStateEvent.class, listener, URI_CHANGED_METHOD);
} | 3.68 |
hadoop_CommitUtilsWithMR_getBaseMagicTaskAttemptPath | /**
* Get the base Magic attempt path, without any annotations to mark relative
* references.
* If there is an app attempt property in the context configuration, that
* is included.
* @param context task context.
* @param jobUUID unique Job ID.
* @param dest The output path to commit work into
* @return the path under which all attempts go
*/
public static Path getBaseMagicTaskAttemptPath(TaskAttemptContext context,
String jobUUID,
Path dest) {
return new Path(
getMagicTaskAttemptsPath(jobUUID, dest, getAppAttemptId(context)),
String.valueOf(context.getTaskAttemptID()));
} | 3.68 |
hudi_PostgresDebeziumAvroPayload_containsBytesToastedValues | /**
* Returns true if a column is either of type bytes or a union of one or more bytes that contain a debezium toasted value.
*
* @param incomingRecord The incoming avro record
* @param field the column of interest
* @return
*/
private boolean containsBytesToastedValues(IndexedRecord incomingRecord, Schema.Field field) {
return ((field.schema().getType() == Schema.Type.BYTES
|| (field.schema().getType() == Schema.Type.UNION && field.schema().getTypes().stream().anyMatch(s -> s.getType() == Schema.Type.BYTES)))
// Check length first as an optimization
&& ((ByteBuffer) ((GenericData.Record) incomingRecord).get(field.name())).array().length == DEBEZIUM_TOASTED_VALUE.length()
&& DEBEZIUM_TOASTED_VALUE.equals(new String(((ByteBuffer) ((GenericData.Record) incomingRecord).get(field.name())).array(), StandardCharsets.UTF_8)));
} | 3.68 |
framework_AbstractClientConnector_isThis | /**
* For internal use only, may be changed or removed in future versions.
* <p>
* This method must be protected, because otherwise it will not be redefined
* by the proxy to actually be called on the underlying instance.
* <p>
* See #14639
*
* @deprecated only defined for framework hacks, do not use.
*/
@Deprecated
protected boolean isThis(Object that) {
return this == that;
} | 3.68 |
hudi_KeyRangeLookupTree_insert | /**
* Inserts a new {@link KeyRangeNode} to this look up tree.
*
* If no root exists, make {@code newNode} as the root and return the new root.
*
* If current root and newNode matches with min record key and max record key, merge two nodes. In other words, add
* files from {@code newNode} to current root. Return current root.
*
* If current root is < newNode if current root has no right sub tree update current root's right sub tree max and min
* set newNode as right sub tree else update root's right sub tree min and max with newNode's min and max record key
* as applicable recursively call insert() with root's right subtree as new root
*
* else // current root is >= newNode if current root has no left sub tree update current root's left sub tree max and
* min set newNode as left sub tree else update root's left sub tree min and max with newNode's min and max record key
* as applicable recursively call insert() with root's left subtree as new root
*
* @param root refers to the current root of the look up tree
* @param newNode newNode the new {@link KeyRangeNode} to be inserted
*/
private KeyRangeNode insert(KeyRangeNode root, KeyRangeNode newNode) {
if (root == null) {
root = newNode;
return root;
}
if (root.compareTo(newNode) == 0) {
root.addFiles(newNode.getFileNameList());
return root;
}
if (root.compareTo(newNode) < 0) {
if (root.getRight() == null) {
root.setRightSubTreeMax(newNode.getMaxRecordKey());
root.setRightSubTreeMin(newNode.getMinRecordKey());
root.setRight(newNode);
} else {
root.setRightSubTreeMax(max(root.getRightSubTreeMax(), newNode.getMaxRecordKey()));
root.setRightSubTreeMin(min(root.getRightSubTreeMin(), newNode.getMinRecordKey()));
insert(root.getRight(), newNode);
}
} else {
if (root.getLeft() == null) {
root.setLeftSubTreeMax(newNode.getMaxRecordKey());
root.setLeftSubTreeMin(newNode.getMinRecordKey());
root.setLeft(newNode);
} else {
root.setLeftSubTreeMax(max(root.getLeftSubTreeMax(), newNode.getMaxRecordKey()));
root.setLeftSubTreeMin(min(root.getLeftSubTreeMin(), newNode.getMinRecordKey()));
insert(root.getLeft(), newNode);
}
}
return root;
} | 3.68 |
morf_GraphBasedUpgradeTraversalService_create | /**
* Creates new {@link GraphBasedUpgradeTraversalService} for a given
* {@link GraphBasedUpgrade}.
*
* @param graphBasedUpgrade for which the service should be created
* @return new {@link GraphBasedUpgradeTraversalService} instance
*/
public GraphBasedUpgradeTraversalService create(GraphBasedUpgrade graphBasedUpgrade) {
return new GraphBasedUpgradeTraversalService(graphBasedUpgrade);
} | 3.68 |
framework_UIDL_getChildrenAsXML | /**
* @deprecated
*/
@Deprecated
public String getChildrenAsXML() {
return toString();
} | 3.68 |
hudi_HoodieBaseFileGroupRecordBuffer_doProcessNextDeletedRecord | /**
* Merge a delete record with another record (data, or delete).
*
* @param deleteRecord
* @param existingRecordMetadataPair
* @return
*/
protected Option<DeleteRecord> doProcessNextDeletedRecord(DeleteRecord deleteRecord,
Pair<Option<T>, Map<String, Object>> existingRecordMetadataPair) {
if (existingRecordMetadataPair != null) {
// Merge and store the merged record. The ordering val is taken to decide whether the same key record
// should be deleted or be kept. The old record is kept only if the DELETE record has smaller ordering val.
// For same ordering values, uses the natural order(arrival time semantics).
Comparable existingOrderingVal = readerContext.getOrderingValue(
existingRecordMetadataPair.getLeft(), existingRecordMetadataPair.getRight(), readerSchema,
payloadProps);
Comparable deleteOrderingVal = deleteRecord.getOrderingValue();
// Checks the ordering value does not equal to 0
// because we use 0 as the default value which means natural order
boolean chooseExisting = !deleteOrderingVal.equals(0)
&& ReflectionUtils.isSameClass(existingOrderingVal, deleteOrderingVal)
&& existingOrderingVal.compareTo(deleteOrderingVal) > 0;
if (chooseExisting) {
// The DELETE message is obsolete if the old message has greater orderingVal.
return Option.empty();
}
}
// Do delete.
return Option.of(deleteRecord);
} | 3.68 |
morf_SqlDialect_makeStringLiteral | /**
* Turn a string value into an SQL string literal which has that value.
* <p>
* We use {@linkplain StringUtils#isEmpty(CharSequence)} because we want to
* differentiate between a single space and an empty string.
* </p>
* <p>
* This is necessary because char types cannot be null and must contain a
* single space.
* <p>
*
* @param literalValue the literal value of the string.
* @return SQL String Literal
*/
protected String makeStringLiteral(String literalValue) {
if (StringUtils.isEmpty(literalValue)) {
return "NULL";
}
return String.format("'%s'", escapeSql(literalValue));
} | 3.68 |
graphhopper_CHStorage_create | /**
* Creates a new storage. Alternatively we could load an existing one using {@link #loadExisting()}}.
* The number of nodes must be given here while the expected number of shortcuts can
* be given to prevent some memory allocations, but is not a requirement. When in doubt rather use a small value
* so the resulting files/byte arrays won't be unnecessarily large.
* todo: we could also trim down the shortcuts DataAccess when we are done adding shortcuts
*/
public void create(int nodes, int expectedShortcuts) {
if (nodeCount >= 0)
throw new IllegalStateException("CHStorage can only be created once");
if (nodes < 0)
throw new IllegalStateException("CHStorage must be created with a positive number of nodes");
nodesCH.create((long) nodes * nodeCHEntryBytes);
nodeCount = nodes;
for (int node = 0; node < nodes; node++)
setLastShortcut(toNodePointer(node), -1);
shortcuts.create((long) expectedShortcuts * shortcutEntryBytes);
} | 3.68 |
cron-utils_CronConstraintsFactory_ensureEitherDayOfYearOrMonth | /**
* Creates CronConstraint to ensure that either day-of-year or month is assigned a specific value.
*
* @return newly created CronConstraint instance, never {@code null};
*/
public static CronConstraint ensureEitherDayOfYearOrMonth() {
return new CronConstraint("Both, a day-of-year AND a day-of-month or day-of-week, are not supported.") {
private static final long serialVersionUID = 520379111876897579L;
@Override
public boolean validate(Cron cron) {
CronField dayOfYearField = cron.retrieve(CronFieldName.DAY_OF_YEAR);
if (dayOfYearField != null && !(dayOfYearField.getExpression() instanceof QuestionMark)) {
return cron.retrieve(CronFieldName.DAY_OF_WEEK).getExpression() instanceof QuestionMark
&& cron.retrieve(CronFieldName.DAY_OF_MONTH).getExpression() instanceof QuestionMark;
}
return true;
}
};
} | 3.68 |
framework_Table_getColumnWidth | /**
* Gets the pixel width of column.
*
* @param propertyId
* @return width of column or -1 when value not set
*/
public int getColumnWidth(Object propertyId) {
if (propertyId == null) {
// Since propertyId is null, this is the row header. Use the magic
// id to retrieve the width of the row header.
propertyId = ROW_HEADER_FAKE_PROPERTY_ID;
}
final Integer width = columnWidths.get(propertyId);
if (width == null) {
return -1;
}
return width.intValue();
} | 3.68 |
framework_VTabsheet_isEnabledOnServer | /**
* Returns whether the tab is enabled on server (there is no client-side
* disabling, but the naming convention matches
* {@link #isHiddenOnServer()}).
*
* @return {@code true} if enabled on server, {@code false} otherwise
*/
public boolean isEnabledOnServer() {
return enabledOnServer;
} | 3.68 |
hmily_MetricsReporter_gaugeDecrement | /**
* Gauge decrement.
*
* @param name name
*/
public static void gaugeDecrement(final String name) {
gaugeDecrement(name, null);
} | 3.68 |
framework_VAbstractTextualDate_setFormatString | /**
* Sets the date format string to use for the text field.
*
* @param formatString
* the format string to use, or {@code null} to force re-creating
* the format string from the locale the next time it is needed
* @since 8.1
*/
public void setFormatString(String formatString) {
this.formatStr = formatString;
} | 3.68 |
hmily_HmilySQLComputeUtils_getAllColumns | /**
* Get all columns.
*
* @param segment hmily simple table segment
* @param tableName table name
* @return all table columns in asterisk way
*/
public static String getAllColumns(final HmilySimpleTableSegment segment, final String tableName) {
String result;
if (segment.getAlias().isPresent()) {
result = String.format("%s.*", segment.getAlias().get());
} else if (segment.getOwner().isPresent()) {
result = String.format("%s.%s.*", segment.getOwner(), tableName);
} else {
result = String.format("%s.*", tableName);
}
return result;
} | 3.68 |
hbase_ColumnPaginationFilter_parseFrom | /**
* Parse a serialized representation of {@link ColumnPaginationFilter}
* @param pbBytes A pb serialized {@link ColumnPaginationFilter} instance
* @return An instance of {@link ColumnPaginationFilter} made from <code>bytes</code>
* @throws DeserializationException if an error occurred
* @see #toByteArray
*/
public static ColumnPaginationFilter parseFrom(final byte[] pbBytes)
throws DeserializationException {
FilterProtos.ColumnPaginationFilter proto;
try {
proto = FilterProtos.ColumnPaginationFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
if (proto.hasColumnOffset()) {
return new ColumnPaginationFilter(proto.getLimit(), proto.getColumnOffset().toByteArray());
}
return new ColumnPaginationFilter(proto.getLimit(), proto.getOffset());
} | 3.68 |
framework_GridMultiSelect_addSelectionListener | /**
* Adds a generic listener to this selection model, accepting both single
* and multiselection events.
*
* @param listener
* the listener to add
* @return a registration handle for removing the listener
*/
public Registration addSelectionListener(SelectionListener<T> listener) {
return model.addSelectionListener(listener);
} | 3.68 |
framework_VGridLayout_createNewCell | /**
* Creates a new Cell with the given coordinates.
* <p>
* For internal use only. May be removed or replaced in the future.
*
* @param row
* @param col
* @return
*/
public Cell createNewCell(int row, int col) {
Cell cell = new Cell(row, col);
cells[col][row] = cell;
return cell;
} | 3.68 |
flink_SkipListUtils_getNextIndexNode | /**
* Returns next key pointer on the given index level.
*
* @param memorySegment memory segment for key space.
* @param offset offset of key space in the memory segment.
* @param level level of index.
*/
public static long getNextIndexNode(MemorySegment memorySegment, int offset, int level) {
return memorySegment.getLong(offset + INDEX_NEXT_OFFSET_BY_LEVEL_ARRAY[level]);
} | 3.68 |
hadoop_AbstractS3ACommitter_deleteTaskAttemptPathQuietly | /**
* Delete the task attempt path without raising any errors.
* @param context task context
*/
protected void deleteTaskAttemptPathQuietly(TaskAttemptContext context) {
Path attemptPath = getBaseTaskAttemptPath(context);
ignoreIOExceptions(LOG, "Delete task attempt path", attemptPath.toString(),
() -> deleteQuietly(
getTaskAttemptFilesystem(context), attemptPath, true));
} | 3.68 |
flink_ExecutionEnvironment_createProgramPlan | /**
* Creates the program's {@link Plan}. The plan is a description of all data sources, data
* sinks, and operations and how they interact, as an isolated unit that can be executed with an
* {@link PipelineExecutor}. Obtaining a plan and starting it with an executor is an alternative
* way to run a program and is only possible if the program consists only of distributed
* operations.
*
* @param jobName The name attached to the plan (displayed in logs and monitoring).
* @param clearSinks Whether or not to start a new stage of execution.
* @return The program's plan.
*/
@Internal
public Plan createProgramPlan(String jobName, boolean clearSinks) {
checkNotNull(jobName);
if (this.sinks.isEmpty()) {
if (wasExecuted) {
throw new RuntimeException(
"No new data sinks have been defined since the "
+ "last execution. The last execution refers to the latest call to "
+ "'execute()', 'count()', 'collect()', or 'print()'.");
} else {
throw new RuntimeException(
"No data sinks have been created yet. "
+ "A program needs at least one sink that consumes data. "
+ "Examples are writing the data set or printing it.");
}
}
final PlanGenerator generator =
new PlanGenerator(sinks, config, getParallelism(), cacheFile, jobName);
final Plan plan = generator.generate();
// clear all the sinks such that the next execution does not redo everything
if (clearSinks) {
this.sinks.clear();
wasExecuted = true;
}
return plan;
} | 3.68 |
flink_FsStateBackend_configure | /**
* Creates a copy of this state backend that uses the values defined in the configuration for
* fields where that were not specified in this state backend.
*
* @param config the configuration
* @return The re-configured variant of the state backend
*/
@Override
public FsStateBackend configure(ReadableConfig config, ClassLoader classLoader) {
return new FsStateBackend(this, config, classLoader);
} | 3.68 |
hbase_ByteBuffAllocator_getFreeBufferCount | /**
* The {@link ConcurrentLinkedQueue#size()} is O(N) complexity and time-consuming, so DO NOT use
* the method except in UT.
*/
public int getFreeBufferCount() {
return this.buffers.size();
} | 3.68 |
framework_ContainerEventProvider_getEvent | /**
* Converts an event in the container to an {@link CalendarEvent}
*
* @param index
* The index of the item in the container to get the event for
* @return
*/
private CalendarEvent getEvent(int index) {
// Check the event cache first
for (CalendarEvent e : eventCache) {
if (e instanceof ContainerCalendarEvent
&& ((ContainerCalendarEvent) e)
.getContainerIndex() == index) {
return e;
} else if (container.getIdByIndex(index) == e) {
return e;
}
}
final Object id = container.getIdByIndex(index);
Item item = container.getItem(id);
CalendarEvent event;
if (id instanceof CalendarEvent) {
/*
* If we are using the BeanItemContainer or another container which
* stores the objects as ids then just return the instances
*/
event = (CalendarEvent) id;
} else {
/*
* Else we use the properties to create the event
*/
BasicEvent basicEvent = new ContainerCalendarEvent(index);
// Set values from property values
if (captionProperty != null
&& item.getItemPropertyIds().contains(captionProperty)) {
basicEvent.setCaption(String.valueOf(
item.getItemProperty(captionProperty).getValue()));
}
if (descriptionProperty != null && item.getItemPropertyIds()
.contains(descriptionProperty)) {
basicEvent.setDescription(String.valueOf(
item.getItemProperty(descriptionProperty).getValue()));
}
if (startDateProperty != null
&& item.getItemPropertyIds().contains(startDateProperty)) {
basicEvent.setStart((Date) item
.getItemProperty(startDateProperty).getValue());
}
if (endDateProperty != null
&& item.getItemPropertyIds().contains(endDateProperty)) {
basicEvent.setEnd((Date) item.getItemProperty(endDateProperty)
.getValue());
}
if (styleNameProperty != null
&& item.getItemPropertyIds().contains(styleNameProperty)) {
basicEvent.setStyleName(String.valueOf(
item.getItemProperty(styleNameProperty).getValue()));
}
if (allDayProperty != null
&& item.getItemPropertyIds().contains(allDayProperty)) {
basicEvent.setAllDay((Boolean) item
.getItemProperty(allDayProperty).getValue());
}
event = basicEvent;
}
return event;
} | 3.68 |
framework_Form_validate | /**
* Checks the validity of the Form and all of its fields.
*
* @see Validatable#validate()
*/
@Override
public void validate() throws InvalidValueException {
super.validate();
for (final Object id : propertyIds) {
fields.get(id).validate();
}
} | 3.68 |
flink_SegmentsUtil_getBytes | /** Maybe not copied, if want copy, please use copyTo. */
public static byte[] getBytes(MemorySegment[] segments, int baseOffset, int sizeInBytes) {
// avoid copy if `base` is `byte[]`
if (segments.length == 1) {
byte[] heapMemory = segments[0].getHeapMemory();
if (baseOffset == 0 && heapMemory != null && heapMemory.length == sizeInBytes) {
return heapMemory;
} else {
byte[] bytes = new byte[sizeInBytes];
segments[0].get(baseOffset, bytes, 0, sizeInBytes);
return bytes;
}
} else {
byte[] bytes = new byte[sizeInBytes];
copyMultiSegmentsToBytes(segments, baseOffset, bytes, 0, sizeInBytes);
return bytes;
}
} | 3.68 |
graphhopper_RoutingExample_customizableRouting | /**
* To customize profiles in the config.yml file you can use a json or yml file or embed it directly. See this list:
* web/src/test/resources/com/graphhopper/application/resources and https://www.graphhopper.com/?s=customizable+routing
*/
public static void customizableRouting(String ghLoc) {
GraphHopper hopper = new GraphHopper();
hopper.setOSMFile(ghLoc);
hopper.setGraphHopperLocation("target/routing-custom-graph-cache");
CustomModel serverSideCustomModel = new CustomModel();
hopper.setProfiles(new Profile("car_custom").setCustomModel(serverSideCustomModel).setVehicle("car"));
// The hybrid mode uses the "landmark algorithm" and is up to 15x faster than the flexible mode (Dijkstra).
// Still it is slower than the speed mode ("contraction hierarchies algorithm") ...
hopper.getLMPreparationHandler().setLMProfiles(new LMProfile("car_custom"));
hopper.importOrLoad();
// ... but for the hybrid mode we can customize the route calculation even at request time:
// 1. a request with default preferences
GHRequest req = new GHRequest().setProfile("car_custom").
addPoint(new GHPoint(42.506472, 1.522475)).addPoint(new GHPoint(42.513108, 1.536005));
GHResponse res = hopper.route(req);
if (res.hasErrors())
throw new RuntimeException(res.getErrors().toString());
assert Math.round(res.getBest().getTime() / 1000d) == 94;
// 2. now avoid primary roads and reduce maximum speed, see docs/core/custom-models.md for an in-depth explanation
// and also the blog posts https://www.graphhopper.com/?s=customizable+routing
CustomModel model = new CustomModel();
model.addToPriority(If("road_class == PRIMARY", MULTIPLY, "0.5"));
// unconditional limit to 100km/h
model.addToPriority(If("true", LIMIT, "100"));
req.setCustomModel(model);
res = hopper.route(req);
if (res.hasErrors())
throw new RuntimeException(res.getErrors().toString());
assert Math.round(res.getBest().getTime() / 1000d) == 164;
} | 3.68 |
hbase_QuotaObserverChore_getNamespaceQuotaTables | /**
* Returns an unmodifiable view of all tables in namespaces that have namespace quotas.
*/
public Set<TableName> getNamespaceQuotaTables() {
return Collections.unmodifiableSet(tablesWithNamespaceQuotas);
} | 3.68 |
flink_AbstractStreamingWriter_commitUpToCheckpoint | /** Commit up to this checkpoint id. */
protected void commitUpToCheckpoint(long checkpointId) throws Exception {
helper.commitUpToCheckpoint(checkpointId);
} | 3.68 |
AreaShop_WorldEditSelection_getMaximumLocation | /**
* Get the maximum Location of the selection.
* @return Location with the highest x, y and z
*/
public Location getMaximumLocation() {
return maximum;
} | 3.68 |
hbase_HBaseTestingUtility_validate | /**
* Validate that all the rows between startRow and stopRow are seen exactly once, and all other
* rows none
*/
public void validate() {
for (byte b1 = 'a'; b1 <= 'z'; b1++) {
for (byte b2 = 'a'; b2 <= 'z'; b2++) {
for (byte b3 = 'a'; b3 <= 'z'; b3++) {
int count = seenRows[i(b1)][i(b2)][i(b3)];
int expectedCount = 0;
if (
Bytes.compareTo(new byte[] { b1, b2, b3 }, startRow) >= 0
&& Bytes.compareTo(new byte[] { b1, b2, b3 }, stopRow) < 0
) {
expectedCount = 1;
}
if (count != expectedCount) {
String row = new String(new byte[] { b1, b2, b3 }, StandardCharsets.UTF_8);
throw new RuntimeException("Row:" + row + " has a seen count of " + count + " "
+ "instead of " + expectedCount);
}
}
}
}
} | 3.68 |
flink_StateWithExecutionGraph_updateTaskExecutionState | /**
* Updates the execution graph with the given task execution state transition.
*
* @param taskExecutionStateTransition taskExecutionStateTransition to update the ExecutionGraph
* with
* @param failureLabels the failure labels to attach to the task failure cause
* @return {@code true} if the update was successful; otherwise {@code false}
*/
boolean updateTaskExecutionState(
TaskExecutionStateTransition taskExecutionStateTransition,
CompletableFuture<Map<String, String>> failureLabels) {
// collect before updateState, as updateState may deregister the execution
final Optional<AccessExecution> maybeExecution =
executionGraph.findExecution(taskExecutionStateTransition.getID());
final Optional<String> maybeTaskName =
executionGraph.findVertexWithAttempt(taskExecutionStateTransition.getID());
final ExecutionState desiredState = taskExecutionStateTransition.getExecutionState();
boolean successfulUpdate = getExecutionGraph().updateState(taskExecutionStateTransition);
if (successfulUpdate && desiredState == ExecutionState.FAILED) {
final AccessExecution execution =
maybeExecution.orElseThrow(NoSuchElementException::new);
final String taskName = maybeTaskName.orElseThrow(NoSuchElementException::new);
final ExecutionState currentState = execution.getState();
if (currentState == desiredState) {
failureCollection.add(
ExceptionHistoryEntry.create(execution, taskName, failureLabels));
onFailure(
ErrorInfo.handleMissingThrowable(
taskExecutionStateTransition.getError(userCodeClassLoader)));
}
}
return successfulUpdate;
} | 3.68 |
querydsl_ProjectableSQLQuery_addFlag | /**
* Add the given Expression as a query flag
*
* @param position position
* @param flag query flag
* @return the current object
*/
@Override
public Q addFlag(Position position, Expression<?> flag) {
return queryMixin.addFlag(new QueryFlag(position, flag));
} | 3.68 |
hadoop_SinglePendingCommit_getCreated | /**
* When was the upload created?
* @return timestamp
*/
public long getCreated() {
return created;
} | 3.68 |
framework_DateToLongConverter_convertToPresentation | /*
* (non-Javadoc)
*
* @see
* com.vaadin.data.util.converter.Converter#convertToPresentation(java.lang
* .Object, java.lang.Class, java.util.Locale)
*/
@Override
public Date convertToPresentation(Long value,
Class<? extends Date> targetType, Locale locale) {
if (targetType != getPresentationType()) {
throw new ConversionException(
"Converter only supports " + getPresentationType().getName()
+ " (targetType was " + targetType.getName() + ")");
}
if (value == null) {
return null;
}
return new Date(value);
} | 3.68 |
hudi_LSMTimeline_listAllMetaFiles | /**
* List all the parquet metadata files.
*/
public static FileStatus[] listAllMetaFiles(HoodieTableMetaClient metaClient) throws IOException {
return metaClient.getFs().globStatus(
new Path(metaClient.getArchivePath() + "/*.parquet"));
} | 3.68 |
flink_AbstractMetricGroup_getQueryServiceMetricInfo | /**
* Returns the metric query service scope for this group.
*
* @param filter character filter
* @return query service scope
*/
public QueryScopeInfo getQueryServiceMetricInfo(CharacterFilter filter) {
if (queryServiceScopeInfo == null) {
queryServiceScopeInfo = createQueryServiceMetricInfo(filter);
}
return queryServiceScopeInfo;
} | 3.68 |
hadoop_RpcNoSuchMethodException_getRpcStatusProto | /**
* get the rpc status corresponding to this exception
*/
public RpcStatusProto getRpcStatusProto() {
return RpcStatusProto.ERROR;
} | 3.68 |
hadoop_ReconfigurationTaskStatus_hasTask | /**
* Return true if
* - A reconfiguration task has finished or
* - an active reconfiguration task is running.
* @return true if startTime > 0; false if not.
*/
public boolean hasTask() {
return startTime > 0;
} | 3.68 |
flink_StreamExecutionEnvironment_registerCachedFile | /**
* Registers a file at the distributed cache under the given name. The file will be accessible
* from any user-defined function in the (distributed) runtime under a local path. Files may be
* local files (which will be distributed via BlobServer), or files in a distributed file
* system. The runtime will copy the files temporarily to a local cache, if needed.
*
* <p>The {@link org.apache.flink.api.common.functions.RuntimeContext} can be obtained inside
* UDFs via {@link org.apache.flink.api.common.functions.RichFunction#getRuntimeContext()} and
* provides access {@link org.apache.flink.api.common.cache.DistributedCache} via {@link
* org.apache.flink.api.common.functions.RuntimeContext#getDistributedCache()}.
*
* @param filePath The path of the file, as a URI (e.g. "file:///some/path" or
* "hdfs://host:port/and/path")
* @param name The name under which the file is registered.
* @param executable flag indicating whether the file should be executable
*/
public void registerCachedFile(String filePath, String name, boolean executable) {
this.cacheFile.add(
new Tuple2<>(
name, new DistributedCache.DistributedCacheEntry(filePath, executable)));
} | 3.68 |
hudi_BigQuerySchemaResolver_schemaToSqlString | /**
* Converts a BigQuery schema to the string representation used in the BigQuery SQL command to create the manifest based table.
* @param schema The BigQuery schema
* @return The string representation of the schema
*/
public static String schemaToSqlString(Schema schema) {
return fieldsToSqlString(schema.getFields());
} | 3.68 |
hbase_MetricsTableRequests_updatePutBatch | /**
* Update the batch Put time histogram
* @param t time it took
*/
public void updatePutBatch(long t) {
if (isEnableTableLatenciesMetrics()) {
putBatchTimeHistogram.update(t);
}
} | 3.68 |
hmily_Binder_setProperty | /**
* Sets property.
*
* @param property the property
*/
public void setProperty(final ConfigProperty property) {
this.property = property;
} | 3.68 |
hbase_CellCodec_readByteArray | /** Returns Byte array read from the stream. n */
private byte[] readByteArray(final InputStream in) throws IOException {
byte[] intArray = new byte[Bytes.SIZEOF_INT];
IOUtils.readFully(in, intArray);
int length = Bytes.toInt(intArray);
byte[] bytes = new byte[length];
IOUtils.readFully(in, bytes);
return bytes;
} | 3.68 |
hbase_QuotaSettingsFactory_unthrottleTableByThrottleType | /**
* Remove the throttling for the specified table.
* @param tableName the table
* @param type the type of throttling
* @return the quota settings
*/
public static QuotaSettings unthrottleTableByThrottleType(final TableName tableName,
final ThrottleType type) {
return throttle(null, tableName, null, null, type, 0, null, QuotaScope.MACHINE);
} | 3.68 |
hudi_CompactNode_execute | /**
* Method helps to start the compact operation. It will compact the last pending compact instant in the timeline
* if it has one.
*
* @param executionContext Execution context to run this compaction
* @param curItrCount cur iteration count.
* @throws Exception will be thrown if any error occurred.
*/
@Override
public void execute(ExecutionContext executionContext, int curItrCount) throws Exception {
HoodieTableMetaClient metaClient =
HoodieTableMetaClient.builder().setConf(executionContext.getHoodieTestSuiteWriter().getConfiguration()).setBasePath(executionContext.getHoodieTestSuiteWriter().getCfg().targetBasePath)
.build();
Option<HoodieInstant> lastInstant = metaClient.getActiveTimeline()
.getWriteTimeline().filterPendingCompactionTimeline().lastInstant();
if (lastInstant.isPresent()) {
log.info("Compacting instant {}", lastInstant.get());
this.result = executionContext.getHoodieTestSuiteWriter().compact(Option.of(lastInstant.get().getTimestamp()));
executionContext.getHoodieTestSuiteWriter().commitCompaction(result, executionContext.getJsc().emptyRDD(), Option.of(lastInstant.get().getTimestamp()));
}
} | 3.68 |
framework_BinderValidationStatus_getValidationErrors | /**
* Gets both field and bean level validation errors.
*
* @return a list of all validation errors
*/
public List<ValidationResult> getValidationErrors() {
List<ValidationResult> errors = getFieldValidationErrors().stream()
.map(s -> s.getResult().get()).collect(Collectors.toList());
errors.addAll(getBeanValidationErrors());
return errors;
} | 3.68 |
hadoop_NMClientAsync_onContainerRestartError | /**
* Error Callback for container restart.
*
* @param containerId the Id of the container to restart.
* @param t a Throwable.
*
*/
public void onContainerRestartError(ContainerId containerId, Throwable t) {} | 3.68 |
hbase_HDFSBlocksDistribution_add | /**
* This will add the distribution from input to this object
* @param otherBlocksDistribution the other hdfs blocks distribution
*/
public void add(HDFSBlocksDistribution otherBlocksDistribution) {
Map<String, HostAndWeight> otherHostAndWeights = otherBlocksDistribution.getHostAndWeights();
for (Map.Entry<String, HostAndWeight> otherHostAndWeight : otherHostAndWeights.entrySet()) {
addHostAndBlockWeight(otherHostAndWeight.getValue().host,
otherHostAndWeight.getValue().weight, otherHostAndWeight.getValue().weightForSsd);
}
addUniqueWeight(otherBlocksDistribution.getUniqueBlocksTotalWeight());
} | 3.68 |
flink_TernaryBoolean_fromBoolean | /**
* Converts the given boolean to a TernaryBoolean, {@link #TRUE} or {@link #FALSE} respectively.
*/
public static TernaryBoolean fromBoolean(boolean bool) {
return bool ? TRUE : FALSE;
} | 3.68 |
flink_SecurityUtils_install | /**
* Installs a process-wide security configuration.
*
* <p>Applies the configuration using the available security modules (i.e. Hadoop, JAAS).
*/
public static void install(SecurityConfiguration config) throws Exception {
// Install the security modules first before installing the security context
installModules(config);
installContext(config);
} | 3.68 |
Activiti_TreeMethodExpression_dump | /**
* Print the parse tree.
* @param writer
*/
public void dump(PrintWriter writer) {
NodePrinter.dump(writer, node);
} | 3.68 |
morf_SchemaHomology_checkPrimaryKeys | /**
* Checks the ordering of the primary keys.
*/
private void checkPrimaryKeys(String tableName, List<String> table1UpperCaseKeys, List<String> table2UpperCaseKeys) {
if (table1UpperCaseKeys.size() != table2UpperCaseKeys.size()) {
difference(String.format("Primary key column count on table [%s] does not match. Column are [%s] and [%s]", tableName, table1UpperCaseKeys, table2UpperCaseKeys));
return;
}
for (int i = 0 ; i < table1UpperCaseKeys.size(); i++) {
if (!StringUtils.equals(table1UpperCaseKeys.get(i), table2UpperCaseKeys.get(i))) {
difference(String.format("Primary key at index [%d] on table [%s] does not match. Columns are [%s] and [%s]", i, tableName, table1UpperCaseKeys.get(i), table2UpperCaseKeys.get(i)));
}
}
} | 3.68 |
framework_GwtRpcButtonConnector_showResult | /*
* Show the result box.
*/
private void showResult(String result, String labelID) {
DialogBox box = new DialogBox(true);
Label label = new Label(result);
label.getElement().setId(labelID);
box.add(label);
box.center();
box.show();
} | 3.68 |
streampipes_SpTrajectoryBuilder_createSingleTrajectoryCoordinate | /**
* Creates a Coordinate object with X, Y and M Value to be stored later directly in the trajectory
* object. Should be used always used if adding a subpoint to the trajectory list
*
* @param geom Point geometry, which coordinates will be added to the trajectory list
* @param m Double M value, which will be used to store as extra parameter in the trajectory list
* for additional calculations
* @return CoordinateXYM coordinate object
*/
private Coordinate createSingleTrajectoryCoordinate(Point geom, Double m) {
CoordinateXYM coordinate = new CoordinateXYM((geom.getX()), geom.getY(), m);
return coordinate;
} | 3.68 |
AreaShop_Utils_createCommaSeparatedList | /**
* Create a comma-separated list.
* @param input Collection of object which should be concatenated with comma's in between (skipping null values)
* @return Innput object concatenated with comma's in between
*/
public static String createCommaSeparatedList(Collection<?> input) {
StringBuilder result = new StringBuilder();
boolean first = true;
for(Object object : input) {
if(object != null) {
if(first) {
first = false;
result.append(object.toString());
} else {
result.append(", ").append(object.toString());
}
}
}
return result.toString();
} | 3.68 |
AreaShop_GeneralRegion_isLandlord | /**
* Check if the specified player is the landlord of this region.
* @param landlord The UUID of the players to check for landlord
* @return true if the player is the landlord, otherwise false
*/
public boolean isLandlord(UUID landlord) {
return landlord != null && getLandlord() != null && getLandlord().equals(landlord);
} | 3.68 |
hadoop_HAServiceTarget_supportObserver | /**
* @return true if this target supports the Observer state, false otherwise.
*/
public boolean supportObserver() {
return false;
} | 3.68 |
framework_FieldGroup_getItemDataSource | /**
* Gets the item used by this FieldBinder. Note that you must call
* {@link #commit()} for the item to be updated unless buffered mode has
* been switched off.
*
* @see #setBuffered(boolean)
* @see #commit()
*
* @return The item used by this FieldBinder
*/
public Item getItemDataSource() {
return itemDataSource;
} | 3.68 |
flink_BooleanColumnSummary_getNonNullCount | /** The number of non-null values in this column. */
@Override
public long getNonNullCount() {
return trueCount + falseCount;
} | 3.68 |
hadoop_QueueAclsInfo_getOperations | /**
* Get opearations allowed on queue.
*
* @return array of String
*/
public String[] getOperations() {
return operations;
} | 3.68 |
flink_ExecNodeContext_newPersistedConfig | /**
* Create a configuration for the {@link ExecNode}, ready to be persisted to a JSON plan.
*
* @param execNodeClass The {@link ExecNode} class.
* @param tableConfig The planner configuration (include the {@link TableConfig}).
* @return The {@link ExecNode} configuration, which contains the consumed options for the node,
* defined by {@link ExecNodeMetadata#consumedOptions()}, along with their values.
*/
public static <T extends ExecNode<?>> ReadableConfig newPersistedConfig(
Class<T> execNodeClass, ReadableConfig tableConfig) {
return ExecNodeMetadataUtil.newPersistedConfig(
execNodeClass,
tableConfig,
Stream.concat(
ExecNodeMetadataUtil.TABLE_CONFIG_OPTIONS.stream(),
ExecNodeMetadataUtil.EXECUTION_CONFIG_OPTIONS.stream()));
} | 3.68 |
framework_Tree_setSelectable | /**
* Sets the selectable state. Selectable determines if the a node can be
* selected on the client side. Selectable does not affect
* {@link #setValue(Object)} or {@link #select(Object)}.
*
* <p>
* The tree is selectable by default.
* </p>
*
* @param selectable
* The new selectable state.
*/
public void setSelectable(boolean selectable) {
if (this.selectable != selectable) {
this.selectable = selectable;
markAsDirty();
}
} | 3.68 |
flink_HiveParserQB_getAppendedAliasFromId | // For sub-queries, the id. and alias should be appended since same aliases can be re-used
// within different sub-queries.
// For a query like:
// select ...
// (select * from T1 a where ...) subq1
// join
// (select * from T2 a where ...) subq2
// ..
// the alias is modified to subq1:a and subq2:a from a, to identify the right sub-query.
private static String getAppendedAliasFromId(String outerId, String alias) {
return (outerId == null ? alias : outerId + ":" + alias);
} | 3.68 |
hudi_HoodieBackedTableMetadata_getOrCreateReaders | /**
* Create a file reader and the record scanner for a given partition and file slice
* if readers are not already available.
*
* @param partitionName - Partition name
* @param slice - The file slice to open readers for
* @return File reader and the record scanner pair for the requested file slice
*/
private Pair<HoodieSeekingFileReader<?>, HoodieMetadataLogRecordReader> getOrCreateReaders(String partitionName, FileSlice slice) {
if (reuse) {
Pair<String, String> key = Pair.of(partitionName, slice.getFileId());
return partitionReaders.get().computeIfAbsent(key, ignored -> openReaders(partitionName, slice));
} else {
return openReaders(partitionName, slice);
}
} | 3.68 |
cron-utils_FieldDayOfWeekDefinitionBuilder_and | /**
* Registers CronField in ParserDefinitionBuilder and returns its instance.
*
* @return ParserDefinitionBuilder instance obtained from constructor
*/
@Override
public CronDefinitionBuilder and() {
final boolean zeroInRange = constraints.createConstraintsInstance().isInRange(0);
cronDefinitionBuilder
.register(new DayOfWeekFieldDefinition(fieldName, constraints.createConstraintsInstance(), optional, new WeekDay(mondayDoWValue, zeroInRange)));
return cronDefinitionBuilder;
} | 3.68 |
hudi_FlinkMergeAndReplaceHandle_newFileNameWithRollover | /**
* Use the writeToken + "-" + rollNumber as the new writeToken of a mini-batch write.
*/
protected String newFileNameWithRollover(int rollNumber) {
return FSUtils.makeBaseFileName(instantTime, writeToken + "-" + rollNumber,
this.fileId, hoodieTable.getBaseFileExtension());
} | 3.68 |
flink_FactoryUtil_checkWatermarkOptions | /**
* Check watermark-related options and return error messages.
*
* @param conf table options
* @return Optional of error messages
*/
public static Optional<String> checkWatermarkOptions(ReadableConfig conf) {
// try to validate watermark options by parsing it
watermarkOptionSet.forEach(option -> readOption(conf, option));
// check watermark alignment options
Optional<String> groupOptional = conf.getOptional(WATERMARK_ALIGNMENT_GROUP);
Optional<Duration> maxDriftOptional = conf.getOptional(WATERMARK_ALIGNMENT_MAX_DRIFT);
Optional<Duration> updateIntervalOptional =
conf.getOptional(WATERMARK_ALIGNMENT_UPDATE_INTERVAL);
if ((groupOptional.isPresent()
|| maxDriftOptional.isPresent()
|| updateIntervalOptional.isPresent())
&& (!groupOptional.isPresent() || !maxDriftOptional.isPresent())) {
String errMsg =
String.format(
"'%s' and '%s' must be set when configuring watermark alignment",
WATERMARK_ALIGNMENT_GROUP.key(), WATERMARK_ALIGNMENT_MAX_DRIFT.key());
return Optional.of(errMsg);
}
return Optional.empty();
} | 3.68 |
framework_VScrollTable_getAlign | /**
* Get the alignment of the text int the cell.
*
* @return Returns either ALIGN_CENTER, ALIGN_LEFT or ALIGN_RIGHT
*/
public char getAlign() {
return align;
} | 3.68 |
framework_AbstractClientConnector_createState | /**
* Creates the shared state bean to be used in server to client
* communication.
* <p>
* By default a state object of the defined return type of
* {@link #getState()} is created. Subclasses can override this method and
* return a new instance of the correct state class but this should rarely
* be necessary.
* </p>
* <p>
* No configuration of the values of the state should be performed in
* {@link #createState()}.
*
* @since 7.0
*
* @return new shared state object
*/
protected SharedState createState() {
try {
return ReflectTools.createInstance(getStateType());
} catch (Exception e) {
throw new RuntimeException("Error creating state of type "
+ getStateType().getName() + " for " + getClass().getName(),
e);
}
} | 3.68 |
flink_MathUtils_isPowerOf2 | /**
* Checks whether the given value is a power of two.
*
* @param value The value to check.
* @return True, if the value is a power of two, false otherwise.
*/
public static boolean isPowerOf2(long value) {
return (value & (value - 1)) == 0;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.