name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_BasicWeekClickHandler_weekClick | /*
* (non-Javadoc)
*
* @see
* com.vaadin.addon.calendar.ui.CalendarComponentEvents.WeekClickHandler
* #weekClick
* (com.vaadin.addon.calendar.ui.CalendarComponentEvents.WeekClick)
*/
@Override
public void weekClick(WeekClick event) {
int week = event.getWeek();
int year = event.getYear();
// set correct year and month
Calendar javaCalendar = event.getComponent().getInternalCalendar();
javaCalendar.set(GregorianCalendar.YEAR, year);
javaCalendar.set(GregorianCalendar.WEEK_OF_YEAR, week);
// starting at the beginning of the week
javaCalendar.set(GregorianCalendar.DAY_OF_WEEK,
javaCalendar.getFirstDayOfWeek());
Date start = javaCalendar.getTime();
// ending at the end of the week
javaCalendar.add(GregorianCalendar.DATE, 6);
Date end = javaCalendar.getTime();
setDates(event, start, end);
// times are automatically expanded, no need to worry about them
} | 3.68 |
hibernate-validator_ValidatorImpl_getValueContextForValueValidation | /**
* Returns a value context pointing to the given property path relative to the specified root class without a value.
* <p>
* We are only able to use the static types as we don't have the value.
* </p>
*
* @param rootBeanClass The class of the root bean.
* @param propertyPath The property path for which constraints have to be collected.
* @return Returns an instance of {@code ValueContext} which describes the local validation context associated to
* the given property path.
*/
private <V> BeanValueContext<?, V> getValueContextForValueValidation(Class<?> rootBeanClass,
PathImpl propertyPath) {
Class<?> clazz = rootBeanClass;
BeanMetaData<?> beanMetaData = null;
PropertyMetaData propertyMetaData = null;
Iterator<Path.Node> propertyPathIter = propertyPath.iterator();
while ( propertyPathIter.hasNext() ) {
// cast is ok, since we are dealing with engine internal classes
NodeImpl propertyPathNode = (NodeImpl) propertyPathIter.next();
beanMetaData = beanMetaDataManager.getBeanMetaData( clazz );
propertyMetaData = getBeanPropertyMetaData( beanMetaData, propertyPathNode );
// if the property is not the leaf property, we set up the context for the next iteration
if ( propertyPathIter.hasNext() ) {
// if we are in the case of an iterable and we want to validate an element of this iterable, we have to get the
// type from the parameterized type
if ( propertyPathNode.isIterable() ) {
propertyPathNode = (NodeImpl) propertyPathIter.next();
clazz = ReflectionHelper.getClassFromType( ReflectionHelper.getCollectionElementType( propertyMetaData.getType() ) );
beanMetaData = beanMetaDataManager.getBeanMetaData( clazz );
propertyMetaData = getBeanPropertyMetaData( beanMetaData, propertyPathNode );
}
else {
clazz = ReflectionHelper.getClassFromType( propertyMetaData.getType() );
}
}
}
if ( propertyMetaData == null ) {
// should only happen if the property path is empty, which should never happen
throw LOG.getInvalidPropertyPathException( clazz, propertyPath.asString() );
}
propertyPath.removeLeafNode();
return ValueContexts.getLocalExecutionContextForValueValidation( validatorScopedContext.getParameterNameProvider(), beanMetaData, propertyPath );
} | 3.68 |
hadoop_FedBalanceContext_setDelayDuration | /**
* Specify the delayed duration when the procedures need to retry.
* @param value the delay duration.
* @return the builder.
*/
public Builder setDelayDuration(long value) {
this.delayDuration = value;
return this;
} | 3.68 |
hbase_SimpleRpcServer_getListenerAddress | /**
* Return the socket (ip+port) on which the RPC server is listening to. May return null if the
* listener channel is closed.
* @return the socket (ip+port) on which the RPC server is listening to, or null if this
* information cannot be determined
*/
@Override
public synchronized InetSocketAddress getListenerAddress() {
if (listener == null) {
return null;
}
return listener.getAddress();
} | 3.68 |
druid_ListDG_topologicalSort | /*
* 拓扑排序
*
* 返回值:
* true 成功排序,并输入结果
* false 失败(该有向图是有环的)
*/
public boolean topologicalSort(Object[] tops) {
int index = 0;
int num = mVexs.size();
int[] ins; // 入度数组
//Object[] tops; // 拓扑排序结果数组,记录每个节点的排序后的序号。
Queue<Integer> queue; // 辅组队列
ins = new int[num];
//tops = new Object[num];
queue = new LinkedList<Integer>();
// 统计每个顶点的入度数
for (int i = 0; i < num; i++) {
ENode node = mVexs.get(i).firstEdge;
while (node != null) {
ins[node.ivex]++;
node = node.nextEdge;
}
}
// 将所有入度为0的顶点入队列
for (int i = 0; i < num; i++) {
if (ins[i] == 0) {
queue.offer(i); // 入队列
}
}
while (!queue.isEmpty()) { // 队列非空
int j = queue.poll().intValue(); // 出队列。j是顶点的序号
tops[index++] = mVexs.get(j).data; // 将该顶点添加到tops中,tops是排序结果
ENode node = mVexs.get(j).firstEdge; // 获取以该顶点为起点的出边队列
// 将与"node"关联的节点的入度减1;
// 若减1之后,该节点的入度为0;则将该节点添加到队列中。
while (node != null) {
// 将节点(序号为node.ivex)的入度减1。
ins[node.ivex]--;
// 若节点的入度为0,则将其"入队列"
if (ins[node.ivex] == 0) {
queue.offer(node.ivex); // 入队列
}
node = node.nextEdge;
}
}
if (index != num) {
return false;
}
return true;
} | 3.68 |
framework_ScrollbarBundle_setLocked | /**
* Locks or unlocks the scrollbar bundle.
* <p>
* A locked scrollbar bundle will refuse to scroll, both programmatically
* and via user-triggered events.
*
* @param isLocked
* <code>true</code> to lock, <code>false</code> to unlock
*/
public void setLocked(boolean isLocked) {
this.isLocked = isLocked;
} | 3.68 |
hudi_MetadataCommand_setMetadataBaseDirectory | /**
* Sets the directory to store/read Metadata Table.
* <p>
* This can be used to store the metadata table away from the dataset directory.
* - Useful for testing as well as for using via the HUDI CLI so that the actual dataset is not written to.
* - Useful for testing Metadata Table performance and operations on existing datasets before enabling.
*/
public static void setMetadataBaseDirectory(String metadataDir) {
ValidationUtils.checkState(metadataBaseDirectory == null,
"metadataBaseDirectory is already set to " + metadataBaseDirectory);
metadataBaseDirectory = metadataDir;
} | 3.68 |
hbase_Procedure_getProcId | // ==========================================================================
// Those fields are unchanged after initialization.
//
// Each procedure will get created from the user or during
// ProcedureExecutor.start() during the load() phase and then submitted
// to the executor. these fields will never be changed after initialization
// ==========================================================================
public long getProcId() {
return procId;
} | 3.68 |
querydsl_QueryBase_orderBy | /**
* Add order expressions
*
* @param o order
* @return the current object
*/
public Q orderBy(OrderSpecifier<?>... o) {
return queryMixin.orderBy(o);
} | 3.68 |
framework_VaadinService_getApplicationUrl | /**
* Get the base URL that should be used for sending requests back to this
* service.
* <p>
* This is only used to support legacy cases.
*
* @param request
* @return
* @throws MalformedURLException
*
* @deprecated As of 7.0. Only used to support {@link LegacyApplication}.
*/
@Deprecated
protected URL getApplicationUrl(VaadinRequest request)
throws MalformedURLException {
return null;
} | 3.68 |
flink_Execution_triggerSynchronousSavepoint | /**
* Trigger a new checkpoint on the task of this execution.
*
* @param checkpointId of th checkpoint to trigger
* @param timestamp of the checkpoint to trigger
* @param checkpointOptions of the checkpoint to trigger
* @return Future acknowledge which is returned once the checkpoint has been triggered
*/
public CompletableFuture<Acknowledge> triggerSynchronousSavepoint(
long checkpointId, long timestamp, CheckpointOptions checkpointOptions) {
return triggerCheckpointHelper(checkpointId, timestamp, checkpointOptions);
} | 3.68 |
framework_VAbstractCalendarPanel_onValueChange | /**
* @deprecated This method is not used by the framework code anymore.
* @return {@code false}
*/
@Deprecated
protected boolean onValueChange() {
return false;
} | 3.68 |
hbase_Procedure_setChildrenLatch | /**
* Called by the ProcedureExecutor on procedure-load to restore the latch state
*/
protected synchronized void setChildrenLatch(int numChildren) {
this.childrenLatch = numChildren;
if (LOG.isTraceEnabled()) {
LOG.trace("CHILD LATCH INCREMENT SET " + this.childrenLatch, new Throwable(this.toString()));
}
} | 3.68 |
querydsl_BeanMap_createWriteMethodArguments | /**
* Creates an array of parameters to pass to the given mutator method.
* If the given object is not the right type to pass to the method
* directly, it will be converted using {@link #convertType(Class,Object)}.
*
* @param method the mutator method
* @param value the value to pass to the mutator method
* @return an array containing one object that is either the given value
* or a transformed value
* @throws IllegalAccessException if {@link #convertType(Class,Object)}
* raises it
* @throws IllegalArgumentException if any other exception is raised
* by {@link #convertType(Class,Object)}
*/
protected Object[] createWriteMethodArguments(Method method, Object value) throws IllegalAccessException {
try {
if (value != null) {
Class<?>[] types = method.getParameterTypes();
if (types != null && types.length > 0) {
Class<?> paramType = types[0];
if (paramType.isPrimitive()) {
paramType = PrimitiveUtils.wrap(paramType);
}
if (!paramType.isAssignableFrom(value.getClass())) {
value = convertType(paramType, value);
}
}
}
return new Object[]{value};
} catch (InvocationTargetException | InstantiationException e) {
throw new IllegalArgumentException(e.getMessage());
}
} | 3.68 |
querydsl_MathExpressions_exp | /**
* Create a {@code exp(num)} expression
*
* <p>Returns the base-e exponential function of num, which is e raised to the power num.</p>
*
* @param num numeric expression
* @return exp(num)
*/
public static <A extends Number & Comparable<?>> NumberExpression<Double> exp(Expression<A> num) {
return Expressions.numberOperation(Double.class, Ops.MathOps.EXP, num);
} | 3.68 |
framework_MouseEvents_getButtonName | /**
* Returns a human readable string representing which button has been
* pushed. This is meant for debug purposes only and the string returned
* could change. Use {@link #getButton()} to check which button was
* pressed.
*
* @since 6.3
* @return A string representation of which button was pushed.
*/
public String getButtonName() {
return details.getButtonName();
} | 3.68 |
framework_ServletIntegrationWebsocketUI_init | /*
* (non-Javadoc)
*
* @see
* com.vaadin.tests.integration.IntegrationTestUI#init(com.vaadin.server
* .VaadinRequest)
*/
@Override
protected void init(VaadinRequest request) {
super.init(request);
// Ensure no fallback is used
getPushConfiguration().setParameter(
PushConfigurationState.FALLBACK_TRANSPORT_PARAM, "none");
} | 3.68 |
hadoop_AuxiliaryService_getRecoveryPath | /**
* Get the path specific to this auxiliary service to use for recovery.
*
* @return state storage path or null if recovery is not enabled
*/
protected Path getRecoveryPath() {
return recoveryPath;
} | 3.68 |
hudi_RDDConsistentBucketBulkInsertPartitioner_repartitionRecords | /**
* Repartition the records to conform the bucket index storage layout constraints.
* Specifically, partition the records based on consistent bucket index, which is computed
* using hashing metadata and records' key.
*
* @param records Input Hoodie records
* @param outputSparkPartitions Not used, the actual parallelism is determined by the bucket number
* @return partitioned records, each partition of data corresponds to a bucket (i.e., file group)
*/
@Override
public JavaRDD<HoodieRecord<T>> repartitionRecords(JavaRDD<HoodieRecord<T>> records, int outputSparkPartitions) {
Map<String, ConsistentBucketIdentifier> partitionToIdentifier = initializeBucketIdentifier(records);
Map<String, Map<String, Integer>> partitionToFileIdPfxIdxMap = generateFileIdPfx(partitionToIdentifier);
return doPartition(records, new Partitioner() {
@Override
public int numPartitions() {
return fileIdPfxList.size();
}
@Override
public int getPartition(Object key) {
HoodieKey hoodieKey = (HoodieKey) key;
String partition = hoodieKey.getPartitionPath();
ConsistentHashingNode node = partitionToIdentifier.get(partition).getBucket(hoodieKey, indexKeyFields);
return partitionToFileIdPfxIdxMap.get(partition).get(node.getFileIdPrefix());
}
});
} | 3.68 |
cron-utils_CronDefinitionBuilder_withSupportedNicknameMidnight | /**
* Supports cron nickname @midnight
*
* @return this CronDefinitionBuilder instance
*/
public CronDefinitionBuilder withSupportedNicknameMidnight() {
cronNicknames.add(CronNicknames.MIDNIGHT);
return this;
} | 3.68 |
hbase_User_createUserForTesting | /**
* Create a user for testing.
* @see User#createUserForTesting(org.apache.hadoop.conf.Configuration, String, String[])
*/
public static User createUserForTesting(Configuration conf, String name, String[] groups) {
synchronized (UserProvider.class) {
if (!(UserProvider.groups instanceof TestingGroups)) {
UserProvider.groups = new TestingGroups(UserProvider.groups);
}
}
((TestingGroups) UserProvider.groups).setUserGroups(name, groups);
return new SecureHadoopUser(UserGroupInformation.createUserForTesting(name, groups));
} | 3.68 |
hadoop_ZKSignerSecretProvider_destroy | /**
* Disconnects from ZooKeeper unless told not to.
*/
@Override
public void destroy() {
if (shouldDisconnect && client != null) {
client.close();
}
super.destroy();
} | 3.68 |
hbase_RpcExecutor_getMethodName | /**
* Return the {@link Descriptors.MethodDescriptor#getName()} from {@code callRunner} or "Unknown".
*/
private static String getMethodName(final CallRunner callRunner) {
return Optional.ofNullable(callRunner).map(CallRunner::getRpcCall).map(RpcCall::getMethod)
.map(Descriptors.MethodDescriptor::getName).orElse("Unknown");
} | 3.68 |
framework_VComboBox_selectPrevPage | /*
* Show the prev page.
*/
private void selectPrevPage() {
if (currentPage > 0) {
dataReceivedHandler.setNavigationCallback(
() -> suggestionPopup.selectLastItem());
filterOptions(currentPage - 1, lastFilter);
}
} | 3.68 |
hadoop_AbfsConfiguration_getPasswordString | /**
* Returns the account-specific password in string form if it exists, then
* looks for an account-agnostic value.
* @param key Account-agnostic configuration key
* @return value in String form if one exists, else null
* @throws IOException
*/
public String getPasswordString(String key) throws IOException {
char[] passchars = rawConfig.getPassword(accountConf(key));
if (passchars == null) {
passchars = rawConfig.getPassword(key);
}
if (passchars != null) {
return new String(passchars);
}
return null;
} | 3.68 |
flink_TableFunctionCollector_getInput | /**
* Gets the input value from left table, which will be used to cross join with the result of
* table function.
*/
public Object getInput() {
return input;
} | 3.68 |
flink_TypeInferenceOperandChecker_updateInferredType | /** Adopted from {@link org.apache.calcite.sql.validate.implicit.AbstractTypeCoercion}. */
private void updateInferredType(SqlValidator validator, SqlNode node, RelDataType type) {
validator.setValidatedNodeType(node, type);
final SqlValidatorNamespace namespace = validator.getNamespace(node);
if (namespace != null) {
namespace.setType(type);
}
} | 3.68 |
hadoop_RunnableCallable_run | /**
* Invokes the wrapped callable/runnable as a runnable.
*
* @throws RuntimeException thrown by the wrapped callable/runnable invocation.
*/
@Override
public void run() {
if (runnable != null) {
runnable.run();
} else {
try {
callable.call();
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
} | 3.68 |
hadoop_EntityGroupFSTimelineStoreMetrics_incrGetEntityToSummaryOps | // Setters
// General read related
public void incrGetEntityToSummaryOps() {
getEntityToSummaryOps.incr();
} | 3.68 |
hbase_ReplicationPeerConfigUtil_parsePeerFrom | /**
* Parse the serialized representation of a peer configuration.
* @param bytes Content of a peer znode.
* @return ClusterKey parsed from the passed bytes.
* @throws DeserializationException deserialization exception
*/
public static ReplicationPeerConfig parsePeerFrom(final byte[] bytes)
throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(bytes)) {
int pbLen = ProtobufUtil.lengthOfPBMagic();
ReplicationProtos.ReplicationPeer.Builder builder =
ReplicationProtos.ReplicationPeer.newBuilder();
ReplicationProtos.ReplicationPeer peer;
try {
ProtobufUtil.mergeFrom(builder, bytes, pbLen, bytes.length - pbLen);
peer = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return convert(peer);
} else {
if (bytes == null || bytes.length <= 0) {
throw new DeserializationException("Bytes to deserialize should not be empty.");
}
return ReplicationPeerConfig.newBuilder().setClusterKey(Bytes.toString(bytes)).build();
}
} | 3.68 |
pulsar_ProducerImpl_verifyLocalBufferIsNotCorrupted | /**
* Computes checksum again and verifies it against existing checksum. If checksum doesn't match it means that
* message is corrupt.
*
* @param op
* @return returns true only if message is not modified and computed-checksum is same as previous checksum else
* return false that means that message is corrupted. Returns true if checksum is not present.
*/
protected boolean verifyLocalBufferIsNotCorrupted(OpSendMsg op) {
ByteBufPair msg = op.cmd;
if (msg != null) {
ByteBuf headerFrame = msg.getFirst();
headerFrame.markReaderIndex();
try {
// skip bytes up to checksum index
headerFrame.skipBytes(4); // skip [total-size]
int cmdSize = (int) headerFrame.readUnsignedInt();
headerFrame.skipBytes(cmdSize);
// verify if checksum present
if (hasChecksum(headerFrame)) {
int checksum = readChecksum(headerFrame);
// msg.readerIndex is already at header-payload index, Recompute checksum for headers-payload
int metadataChecksum = computeChecksum(headerFrame);
long computedChecksum = resumeChecksum(metadataChecksum, msg.getSecond());
return checksum == computedChecksum;
} else {
log.warn("[{}] [{}] checksum is not present into message with id {}", topic, producerName,
op.sequenceId);
}
} finally {
headerFrame.resetReaderIndex();
}
return true;
} else {
log.warn("[{}] Failed while casting empty ByteBufPair, ", producerName);
return false;
}
} | 3.68 |
hbase_ParseFilter_popArguments | /**
* Pops an argument from the operator stack and the number of arguments required by the operator
* from the filterStack and evaluates them
* <p>
* @param operatorStack the stack containing the operators
* @param filterStack the stack containing the filters
* @return the evaluated filter
*/
public static Filter popArguments(Stack<ByteBuffer> operatorStack, Stack<Filter> filterStack) {
ByteBuffer argumentOnTopOfStack = operatorStack.peek();
if (argumentOnTopOfStack.equals(ParseConstants.OR_BUFFER)) {
// The top of the stack is an OR
try {
ArrayList<Filter> listOfFilters = new ArrayList<>();
while (!operatorStack.empty() && operatorStack.peek().equals(ParseConstants.OR_BUFFER)) {
Filter filter = filterStack.pop();
listOfFilters.add(0, filter);
operatorStack.pop();
}
Filter filter = filterStack.pop();
listOfFilters.add(0, filter);
Filter orFilter = new FilterList(FilterList.Operator.MUST_PASS_ONE, listOfFilters);
return orFilter;
} catch (EmptyStackException e) {
throw new IllegalArgumentException("Incorrect input string - an OR needs two filters");
}
} else if (argumentOnTopOfStack.equals(ParseConstants.AND_BUFFER)) {
// The top of the stack is an AND
try {
ArrayList<Filter> listOfFilters = new ArrayList<>();
while (!operatorStack.empty() && operatorStack.peek().equals(ParseConstants.AND_BUFFER)) {
Filter filter = filterStack.pop();
listOfFilters.add(0, filter);
operatorStack.pop();
}
Filter filter = filterStack.pop();
listOfFilters.add(0, filter);
Filter andFilter = new FilterList(FilterList.Operator.MUST_PASS_ALL, listOfFilters);
return andFilter;
} catch (EmptyStackException e) {
throw new IllegalArgumentException("Incorrect input string - an AND needs two filters");
}
} else if (argumentOnTopOfStack.equals(ParseConstants.SKIP_BUFFER)) {
// The top of the stack is a SKIP
try {
Filter wrappedFilter = filterStack.pop();
Filter skipFilter = new SkipFilter(wrappedFilter);
operatorStack.pop();
return skipFilter;
} catch (EmptyStackException e) {
throw new IllegalArgumentException("Incorrect input string - a SKIP wraps a filter");
}
} else if (argumentOnTopOfStack.equals(ParseConstants.WHILE_BUFFER)) {
// The top of the stack is a WHILE
try {
Filter wrappedFilter = filterStack.pop();
Filter whileMatchFilter = new WhileMatchFilter(wrappedFilter);
operatorStack.pop();
return whileMatchFilter;
} catch (EmptyStackException e) {
throw new IllegalArgumentException("Incorrect input string - a WHILE wraps a filter");
}
} else if (argumentOnTopOfStack.equals(ParseConstants.LPAREN_BUFFER)) {
// The top of the stack is a LPAREN
try {
Filter filter = filterStack.pop();
operatorStack.pop();
return filter;
} catch (EmptyStackException e) {
throw new IllegalArgumentException("Incorrect Filter String");
}
} else {
throw new IllegalArgumentException("Incorrect arguments on operatorStack");
}
} | 3.68 |
flink_TaskExecutionState_getExecutionState | /**
* Returns the new execution state of the task.
*
* @return the new execution state of the task
*/
public ExecutionState getExecutionState() {
return this.executionState;
} | 3.68 |
hbase_Bytes_vintToBytes | /**
* Encode a long value as a variable length integer.
* @param vint Integer to make a vint of.
* @return Vint as bytes array.
*/
public static byte[] vintToBytes(final long vint) {
long i = vint;
int size = WritableUtils.getVIntSize(i);
byte[] result = new byte[size];
int offset = 0;
if (i >= -112 && i <= 127) {
result[offset] = (byte) i;
return result;
}
int len = -112;
if (i < 0) {
i ^= -1L; // take one's complement'
len = -120;
}
long tmp = i;
while (tmp != 0) {
tmp = tmp >> 8;
len--;
}
result[offset++] = (byte) len;
len = (len < -120) ? -(len + 120) : -(len + 112);
for (int idx = len; idx != 0; idx--) {
int shiftbits = (idx - 1) * 8;
long mask = 0xFFL << shiftbits;
result[offset++] = (byte) ((i & mask) >> shiftbits);
}
return result;
} | 3.68 |
flink_LimitedConnectionsFileSystem_getNumberOfOpenOutputStreams | /** Gets the number of currently open output streams. */
public int getNumberOfOpenOutputStreams() {
lock.lock();
try {
return numReservedOutputStreams;
} finally {
lock.unlock();
}
} | 3.68 |
querydsl_StringExpression_lower | /**
* Create a {@code this.toLowerCase()} expression
*
* <p>Get the lower case form</p>
*
* @return this.toLowerCase()
* @see java.lang.String#toLowerCase()
*/
public StringExpression lower() {
if (lower == null) {
lower = Expressions.stringOperation(Ops.LOWER, mixin);
}
return lower;
} | 3.68 |
hudi_FileSystemViewManager_createRocksDBBasedFileSystemView | /**
* Create RocksDB based file System view for a table.
*
* @param conf Hadoop Configuration
* @param viewConf View Storage Configuration
* @param metaClient HoodieTableMetaClient
* @return
*/
private static RocksDbBasedFileSystemView createRocksDBBasedFileSystemView(SerializableConfiguration conf,
FileSystemViewStorageConfig viewConf, HoodieTableMetaClient metaClient) {
HoodieTimeline timeline = metaClient.getActiveTimeline().filterCompletedAndCompactionInstants();
return new RocksDbBasedFileSystemView(metaClient, timeline, viewConf);
} | 3.68 |
flink_AbstractKeyedStateBackend_applyToAllKeys | /** @see KeyedStateBackend */
@Override
public <N, S extends State, T> void applyToAllKeys(
final N namespace,
final TypeSerializer<N> namespaceSerializer,
final StateDescriptor<S, T> stateDescriptor,
final KeyedStateFunction<K, S> function)
throws Exception {
applyToAllKeys(
namespace,
namespaceSerializer,
stateDescriptor,
function,
this::getPartitionedState);
} | 3.68 |
hadoop_ManifestSuccessData_setFilenamePaths | /**
* Set the list of filename paths.
*/
@JsonIgnore
public void setFilenamePaths(List<Path> paths) {
setFilenames(new ArrayList<>(
paths.stream()
.map(AbstractManifestData::marshallPath)
.collect(Collectors.toList())));
} | 3.68 |
framework_VTree_getLastRootNode | /**
* Returns the last root node of the tree or null if there are no root
* nodes.
*
* @return The last root {@link TreeNode}
*/
protected TreeNode getLastRootNode() {
if (body.getWidgetCount() == 0) {
return null;
}
return (TreeNode) body.getWidget(body.getWidgetCount() - 1);
} | 3.68 |
framework_Form_attachField | /**
* Adds the field to the form layout.
* <p>
* The field is added to the form layout in the default position (the
* position used by {@link Layout#addComponent(Component)}. If the
* underlying layout is a {@link CustomLayout} the field is added to the
* CustomLayout location given by the string representation of the property
* id using {@link CustomLayout#addComponent(Component, String)}.
* </p>
*
* <p>
* Override this method to control how the fields are added to the layout.
* </p>
*
* @param propertyId
* @param field
*/
protected void attachField(Object propertyId, Field field) {
if (propertyId == null || field == null) {
return;
}
Layout layout = getLayout();
if (layout instanceof CustomLayout) {
((CustomLayout) layout).addComponent(field, propertyId.toString());
} else {
layout.addComponent(field);
}
} | 3.68 |
hadoop_WeightedPolicyInfo_setHeadroomAlpha | /**
* Set the parameter headroomAlpha, used by policies that balance weight-based
* and load-based considerations in their decisions.
*
* For policies that use this parameter, values close to 1 indicate that most
* of the decision should be based on currently observed headroom from various
* sub-clusters, values close to zero, indicate that the decision should be
* mostly based on weights and practically ignore current load.
*
* @param headroomAlpha the value to use for balancing.
*/
public void setHeadroomAlpha(float headroomAlpha) {
this.headroomAlpha = headroomAlpha;
} | 3.68 |
framework_StateChangeEvent_addJsonFields | /**
* Recursively adds the names of all fields in all objects in the provided
* json object.
*
* @param json
* the json object to process
* @param changedProperties
* a set of all currently added fields
* @param context
* the base name of the current object
*/
@Deprecated
private static void addJsonFields(JsonObject json,
FastStringSet changedProperties, String context) {
for (String key : json.keys()) {
String fieldName = context + key;
changedProperties.add(fieldName);
JsonObject object = json.get(key);
if (object != null) {
addJsonFields(object, changedProperties, fieldName + ".");
}
}
} | 3.68 |
flink_PatternStream_inProcessingTime | /** Sets the time characteristic to processing time. */
public PatternStream<T> inProcessingTime() {
return new PatternStream<>(builder.inProcessingTime());
} | 3.68 |
morf_AbstractSqlDialectTest_expectedHints6 | /**
* @return The expected SQL for the {@link SelectStatement#withParallelQueryPlan(int)} directive.
*/
protected String expectedHints6() {
return "SELECT a, b FROM " + tableName("Foo") + " ORDER BY a";
} | 3.68 |
flink_WindowedStream_minBy | /**
* Applies an aggregation that gives the minimum element of the pojo data stream by the given
* field expression for every window. A field expression is either the name of a public field or
* a getter method with parentheses of the {@link DataStream DataStreams} underlying type. A dot
* can be used to drill down into objects, as in {@code "field1.getInnerField2()" }.
*
* @param field The field expression based on which the aggregation will be applied.
* @param first If True then in case of field equality the first object will be returned
* @return The transformed DataStream.
*/
public SingleOutputStreamOperator<T> minBy(String field, boolean first) {
return aggregate(
new ComparableAggregator<>(
field,
input.getType(),
AggregationFunction.AggregationType.MINBY,
first,
input.getExecutionConfig()));
} | 3.68 |
morf_UnionSetOperator_getUnionStrategy | /**
* @return The duplicate row elimination strategy.
* @see UnionStrategy
*/
public UnionStrategy getUnionStrategy() {
return unionStrategy;
} | 3.68 |
morf_TableSetSchema_views | /**
* @see org.alfasoftware.morf.metadata.Schema#views()
*/
@Override
public Collection<View> views() {
return Collections.emptySet();
} | 3.68 |
hbase_TinyLfuBlockCache_asReferencedHeapBlock | /**
* The block cached in TinyLfuBlockCache will always be an heap block: on the one side, the heap
* access will be more faster then off-heap, the small index block or meta block cached in
* CombinedBlockCache will benefit a lot. on other side, the TinyLfuBlockCache size is always
* calculated based on the total heap size, if caching an off-heap block in TinyLfuBlockCache, the
* heap size will be messed up. Here we will clone the block into an heap block if it's an
* off-heap block, otherwise just use the original block. The key point is maintain the refCnt of
* the block (HBASE-22127): <br>
* 1. if cache the cloned heap block, its refCnt is an totally new one, it's easy to handle; <br>
* 2. if cache the original heap block, we're sure that it won't be tracked in ByteBuffAllocator's
* reservoir, if both RPC and TinyLfuBlockCache release the block, then it can be garbage
* collected by JVM, so need a retain here.
* @param buf the original block
* @return an block with an heap memory backend.
*/
private Cacheable asReferencedHeapBlock(Cacheable buf) {
if (buf instanceof HFileBlock) {
HFileBlock blk = ((HFileBlock) buf);
if (blk.isSharedMem()) {
return HFileBlock.deepCloneOnHeap(blk);
}
}
// The block will be referenced by this TinyLfuBlockCache, so should increase its refCnt here.
return buf.retain();
} | 3.68 |
hbase_AbstractMultiFileWriter_abortWriters | /**
* Close all writers without throwing any exceptions. This is used when compaction failed usually.
*/
public List<Path> abortWriters() {
List<Path> paths = new ArrayList<>();
for (StoreFileWriter writer : writers()) {
try {
if (writer != null) {
paths.add(writer.getPath());
writer.close();
}
} catch (Exception ex) {
LOG.error("Failed to close the writer after an unfinished compaction.", ex);
}
}
return paths;
} | 3.68 |
framework_VaadinFinderLocatorStrategy_getPathForElement | /**
* {@inheritDoc}
*/
@Override
public String getPathForElement(Element targetElement) {
Element oldTarget = targetElement;
Widget targetWidget = Util.findPaintable(client, targetElement)
.getWidget();
targetElement = targetWidget.getElement();
// Find SubPart name if needed.
String subPart = null;
boolean hasSubParts = targetWidget instanceof SubPartAware;
if (oldTarget != targetElement) {
if (hasSubParts) {
subPart = ((SubPartAware) targetWidget)
.getSubPartName(DOM.asOld(oldTarget));
}
if (!hasSubParts || subPart == null) {
// Couldn't find SubPart name for element.
return null;
}
}
List<ConnectorPath> hierarchy = getConnectorHierarchyForElement(
targetElement);
List<String> path = new ArrayList<>();
// Assemble longname path components back-to-forth with useful
// predicates - first try ID, then caption.
for (int i = 0; i < hierarchy.size(); ++i) {
ConnectorPath cp = hierarchy.get(i);
String pathFragment = cp.name;
String identifier = getPropertyValue(cp.connector, "id");
if (identifier != null) {
pathFragment += "[id=\"" + identifier + "\"]";
} else {
identifier = getPropertyValue(cp.connector, "caption");
if (identifier != null) {
pathFragment += "[caption=\"" + identifier + "\"]";
}
}
path.add(pathFragment);
}
if (path.isEmpty()) {
// If we didn't find a single element, return null..
return null;
}
return getBestSelector(generateQueries(path), targetElement, subPart);
} | 3.68 |
framework_PureGWTTestApplication_hasCommand | /**
* Tests for the existence of a {@link Command} that is the direct child
* of this level of menu.
*
* @param title
* the command's title
* @return true, if this menu level includes a command item with the
* specified title. Otherwise false.
*/
public boolean hasCommand(String title) {
return getCommand(title) != null;
} | 3.68 |
morf_AbstractSelectStatementBuilder_getFields | /**
* Gets the list of fields
*
* @return the fields
*/
List<AliasedField> getFields() {
return fields;
} | 3.68 |
graphhopper_Entity_human | // shared code between reading and writing
private static final String human (long n) {
if (n >= 1000000) return String.format(Locale.getDefault(), "%.1fM", n/1000000.0);
if (n >= 1000) return String.format(Locale.getDefault(), "%.1fk", n/1000.0);
else return String.format(Locale.getDefault(), "%d", n);
} | 3.68 |
hadoop_RequestFactoryImpl_getServerSideEncryptionAlgorithm | /**
* Get the encryption algorithm of this endpoint.
* @return the encryption algorithm.
*/
@Override
public S3AEncryptionMethods getServerSideEncryptionAlgorithm() {
return encryptionSecrets.getEncryptionMethod();
} | 3.68 |
hudi_HoodieCombineHiveInputFormat_getSplits | /**
* Create Hive splits based on CombineFileSplit.
*/
@Override
public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
PerfLogger perfLogger = SessionState.getPerfLogger();
perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.GET_SPLITS);
init(job);
List<InputSplit> result = new ArrayList<>();
Path[] paths = getInputPaths(job);
List<Path> nonCombinablePaths = new ArrayList<>(paths.length / 2);
List<Path> combinablePaths = new ArrayList<>(paths.length / 2);
int numThreads = Math.min(MAX_CHECK_NONCOMBINABLE_THREAD_NUM,
(int) Math.ceil((double) paths.length / DEFAULT_NUM_PATH_PER_THREAD));
// This check is necessary because for Spark branch, the result array from
// getInputPaths() above could be empty, and therefore numThreads could be 0.
// In that case, Executors.newFixedThreadPool will fail.
if (numThreads > 0) {
try {
Set<Integer> nonCombinablePathIndices = getNonCombinablePathIndices(job, paths, numThreads);
for (int i = 0; i < paths.length; i++) {
if (nonCombinablePathIndices.contains(i)) {
nonCombinablePaths.add(paths[i]);
} else {
combinablePaths.add(paths[i]);
}
}
} catch (Exception e) {
LOG.error("Error checking non-combinable path", e);
perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.GET_SPLITS);
throw new IOException(e);
}
}
// Store the previous value for the path specification
String oldPaths = job.get(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR);
if (LOG.isDebugEnabled()) {
LOG.debug("The received input paths are: [" + oldPaths + "] against the property "
+ org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR);
}
// Process the normal splits
if (nonCombinablePaths.size() > 0) {
FileInputFormat.setInputPaths(job, nonCombinablePaths.toArray(new Path[0]));
InputSplit[] splits = super.getSplits(job, numSplits);
Collections.addAll(result, splits);
}
// Process the combine splits
if (combinablePaths.size() > 0) {
FileInputFormat.setInputPaths(job, combinablePaths.toArray(new Path[0]));
Map<Path, PartitionDesc> pathToPartitionInfo = this.pathToPartitionInfo != null ? this.pathToPartitionInfo
: Utilities.getMapWork(job).getPathToPartitionInfo();
InputSplit[] splits = getCombineSplits(job, numSplits, pathToPartitionInfo);
Collections.addAll(result, splits);
}
// Restore the old path information back
// This is just to prevent incompatibilities with previous versions Hive
// if some application depends on the original value being set.
if (oldPaths != null) {
job.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR, oldPaths);
}
// clear work from ThreadLocal after splits generated in case of thread is reused in pool.
Utilities.clearWorkMapForConf(job);
LOG.info("Number of all splits " + result.size());
perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.GET_SPLITS);
return result.toArray(new InputSplit[result.size()]);
} | 3.68 |
hbase_HFileBlockIndex_getRootSize | /** Returns the size of this chunk if stored in the root index block format */
@Override
public int getRootSize() {
return curTotalRootSize;
} | 3.68 |
querydsl_JTSGeometryExpressions_translate | /**
* Translates the geometry to a new location using the numeric parameters as offsets.
*
* @param expr geometry
* @param deltax x offset
* @param deltay y offset
* @param deltaz z offset
* @param <T>
* @return geometry
*/
public static <T extends Geometry> JTSGeometryExpression<T> translate(Expression<T> expr, float deltax, float deltay, float deltaz) {
return geometryOperation(expr.getType(), SpatialOps.TRANSLATE2,
expr, ConstantImpl.create(deltax), ConstantImpl.create(deltay), ConstantImpl.create(deltaz));
} | 3.68 |
morf_SqlDialect_getDataTypeRepresentation | /**
* Gets the column representation for the datatype, etc.
*
* @param dataType the column datatype.
* @param width the column width.
* @param scale the column scale.
* @return a string representation of the column definition.
*/
protected String getDataTypeRepresentation(DataType dataType, int width, int scale) {
return getColumnRepresentation(dataType, width, scale);
} | 3.68 |
hbase_SingleColumnValueFilter_setLatestVersionOnly | /**
* Set whether only the latest version of the column value should be compared. If true, the row
* will be returned if only the latest version of the column value matches. If false, the row will
* be returned if any version of the column value matches. The default is true.
* @param latestVersionOnly flag
*/
public void setLatestVersionOnly(boolean latestVersionOnly) {
this.latestVersionOnly = latestVersionOnly;
} | 3.68 |
morf_SqlDialect_getOperatorLine | /**
* Convert a criterion into a string expression of the form
* "[operand] [operator] [operand]".
*
* @param criterion the criterion to convert
* @param operator the operator to use in the expression
* @return a string representation of the criterion
*/
protected String getOperatorLine(Criterion criterion, String operator) {
return getSqlFrom(criterion.getField()) + " " + operator + " " + getSqlForCriterionValue(criterion.getValue());
} | 3.68 |
flink_ExpandColumnFunctionsRule_isNameRangeCall | /** Whether the expression is a column name range expression, e.g. withColumns(a ~ b). */
private boolean isNameRangeCall(UnresolvedCallExpression expression) {
return expression.getFunctionDefinition() == RANGE_TO
&& expression.getChildren().get(0) instanceof UnresolvedReferenceExpression
&& expression.getChildren().get(1) instanceof UnresolvedReferenceExpression;
} | 3.68 |
framework_Tree_writeItem | /**
* Recursively writes a data source Item and its children to a design.
*
* @since 7.5.0
* @param design
* the element into which to insert the item
* @param itemId
* the id of the item to write
* @param context
* the DesignContext instance used in writing
* @return
*/
@Override
protected Element writeItem(Element design, Object itemId,
DesignContext context) {
Element element = design.appendElement("node");
element.attr("text", itemId.toString());
Resource icon = getItemIcon(itemId);
if (icon != null) {
DesignAttributeHandler.writeAttribute("icon", element.attributes(),
icon, null, Resource.class, context);
}
if (isSelected(itemId)) {
element.attr("selected", "");
}
Collection<?> children = getChildren(itemId);
if (children != null) {
// Yeah... see #5864
for (Object childItemId : children) {
writeItem(element, childItemId, context);
}
}
return element;
} | 3.68 |
hadoop_FederationStateStoreFacade_getMasterKeyByDelegationKey | /**
* The Router Supports GetMasterKeyByDelegationKey.
*
* @param newKey Key used for generating and verifying delegation tokens
* @throws YarnException if the call to the state store is unsuccessful
* @throws IOException An IO Error occurred
* @return RouterMasterKeyResponse
*/
public RouterMasterKeyResponse getMasterKeyByDelegationKey(DelegationKey newKey)
throws YarnException, IOException {
LOG.info("Storing master key with keyID {}.", newKey.getKeyId());
ByteBuffer keyBytes = ByteBuffer.wrap(newKey.getEncodedKey());
RouterMasterKey masterKey = RouterMasterKey.newInstance(newKey.getKeyId(),
keyBytes, newKey.getExpiryDate());
RouterMasterKeyRequest keyRequest = RouterMasterKeyRequest.newInstance(masterKey);
return stateStore.getMasterKeyByDelegationKey(keyRequest);
} | 3.68 |
open-banking-gateway_DatasafeDataStorage_read | /**
* Reads encrypted database entry
* @param path Path to the entry
*/
@Override
public Optional<byte[]> read(String path) {
return txOper.execute(callback -> find.apply(path).map(getData));
} | 3.68 |
hbase_CompactionLifeCycleTracker_notExecuted | /**
* Called if the compaction request is failed for some reason.
*/
default void notExecuted(Store store, String reason) {
} | 3.68 |
hudi_HiveSchemaUtils_getFieldNames | /**
* Get field names from field schemas.
*/
public static List<String> getFieldNames(List<FieldSchema> fieldSchemas) {
return fieldSchemas.stream().map(FieldSchema::getName).collect(Collectors.toList());
} | 3.68 |
hadoop_FederationRegistryClient_writeAMRMTokenForUAM | /**
* Write/update the UAM token for an application and a sub-cluster.
*
* @param appId ApplicationId.
* @param subClusterId sub-cluster id of the token
* @param token the UAM of the application
* @return whether the amrmToken is added or updated to a new value
*/
public synchronized boolean writeAMRMTokenForUAM(ApplicationId appId,
String subClusterId, Token<AMRMTokenIdentifier> token) {
Map<String, Token<AMRMTokenIdentifier>> subClusterTokenMap =
this.appSubClusterTokenMap.get(appId);
if (subClusterTokenMap == null) {
subClusterTokenMap = new ConcurrentHashMap<>();
this.appSubClusterTokenMap.put(appId, subClusterTokenMap);
}
boolean update = !token.equals(subClusterTokenMap.get(subClusterId));
if (!update) {
LOG.debug("Same amrmToken received from {}, skip writing registry for {}",
subClusterId, appId);
return update;
}
LOG.info("Writing/Updating amrmToken for {} to registry for {}",
subClusterId, appId);
try {
// First, write the token entry
writeRegistry(this.registry, this.user,
getRegistryKey(appId, subClusterId), token.encodeToUrlString(), true);
// Then update the subClusterTokenMap
subClusterTokenMap.put(subClusterId, token);
} catch (YarnException | IOException e) {
LOG.error("Failed writing AMRMToken to registry for subcluster {}.", subClusterId, e);
}
return update;
} | 3.68 |
hadoop_AbfsStatistic_getHttpCall | /**
* Getter for http call for HTTP duration trackers.
*
* @return http call of a statistic.
*/
public String getHttpCall() {
return httpCall;
} | 3.68 |
hbase_ByteBufferIOEngine_isPersistent | /**
* Memory IO engine is always unable to support persistent storage for the cache
*/
@Override
public boolean isPersistent() {
return false;
} | 3.68 |
hbase_TableSchemaModel___setReadOnly | /**
* @param value desired value of READONLY attribute
*/
public void __setReadOnly(boolean value) {
attrs.put(READONLY, Boolean.toString(value));
} | 3.68 |
zxing_SymbolInfo_overrideSymbolSet | /**
* Overrides the symbol info set used by this class. Used for testing purposes.
*
* @param override the symbol info set to use
*/
public static void overrideSymbolSet(SymbolInfo[] override) {
symbols = override;
} | 3.68 |
hudi_StreamerUtil_tableExists | /**
* Returns whether the hoodie table exists under given path {@code basePath}.
*/
public static boolean tableExists(String basePath, org.apache.hadoop.conf.Configuration hadoopConf) {
// Hadoop FileSystem
FileSystem fs = FSUtils.getFs(basePath, hadoopConf);
try {
return fs.exists(new Path(basePath, HoodieTableMetaClient.METAFOLDER_NAME))
&& fs.exists(new Path(new Path(basePath, HoodieTableMetaClient.METAFOLDER_NAME), HoodieTableConfig.HOODIE_PROPERTIES_FILE));
} catch (IOException e) {
throw new HoodieException("Error while checking whether table exists under path:" + basePath, e);
}
} | 3.68 |
flink_DistributedRandomSampler_sampleInCoordinator | /**
* Sample algorithm for the second phase. This operation should be executed as the UDF of an all
* reduce operation.
*
* @param input The intermediate sample output generated in the first phase.
* @return The sampled output.
*/
public Iterator<T> sampleInCoordinator(Iterator<IntermediateSampleData<T>> input) {
if (numSamples == 0) {
return emptyIterable;
}
// This queue holds fixed number elements with the top K weight for the coordinator.
PriorityQueue<IntermediateSampleData<T>> reservoir =
new PriorityQueue<IntermediateSampleData<T>>(numSamples);
int index = 0;
IntermediateSampleData<T> smallest = null;
while (input.hasNext()) {
IntermediateSampleData<T> element = input.next();
if (index < numSamples) {
// Fill the queue with first K elements from input.
reservoir.add(element);
smallest = reservoir.peek();
} else {
// If current element weight is larger than the smallest one in queue, remove the
// element
// with the smallest weight, and append current element into the queue.
if (element.getWeight() > smallest.getWeight()) {
reservoir.remove();
reservoir.add(element);
smallest = reservoir.peek();
}
}
index++;
}
final Iterator<IntermediateSampleData<T>> itr = reservoir.iterator();
return new Iterator<T>() {
@Override
public boolean hasNext() {
return itr.hasNext();
}
@Override
public T next() {
return itr.next().getElement();
}
@Override
public void remove() {
itr.remove();
}
};
} | 3.68 |
hmily_SpringCloudXaAutoConfiguration_hmilyXaInterceptor | /**
* Register {@link FeignRequestInterceptor} Bean.
* @return {@link FeignRequestInterceptor} Bean
*/
@Bean
public RequestInterceptor hmilyXaInterceptor() {
return new FeignRequestInterceptor();
} | 3.68 |
dubbo_RpcStatus_getTotalElapsed | /**
* get total elapsed.
*
* @return total elapsed
*/
public long getTotalElapsed() {
return totalElapsed.get();
} | 3.68 |
framework_Window_setPositionX | /**
* Sets the distance of Window left border in pixels from left border of the
* containing (main window). Has effect only if in {@link WindowMode#NORMAL}
* mode.
*
* @param positionX
* the Distance of Window left border in pixels from left border
* of the containing (main window). or -1 if unspecified.
* @since 4.0.0
*/
public void setPositionX(int positionX) {
getState().positionX = positionX;
getState().centered = false;
} | 3.68 |
flink_BinaryKVInMemorySortBuffer_getIterator | /**
* Gets an iterator over all KV records in this buffer in their logical order.
*
* @return An iterator returning the records in their logical order.
*/
public final MutableObjectIterator<Tuple2<BinaryRowData, BinaryRowData>> getIterator() {
return new MutableObjectIterator<Tuple2<BinaryRowData, BinaryRowData>>() {
private final int size = size();
private int current = 0;
private int currentSegment = 0;
private int currentOffset = 0;
private MemorySegment currentIndexSegment = sortIndex.get(0);
@Override
public Tuple2<BinaryRowData, BinaryRowData> next(
Tuple2<BinaryRowData, BinaryRowData> kv) {
if (this.current < this.size) {
this.current++;
if (this.currentOffset > lastIndexEntryOffset) {
this.currentOffset = 0;
this.currentIndexSegment = sortIndex.get(++this.currentSegment);
}
long pointer = this.currentIndexSegment.getLong(this.currentOffset);
this.currentOffset += indexEntrySize;
try {
return getRecordFromBuffer(kv.f0, kv.f1, pointer);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
} else {
return null;
}
}
@Override
public Tuple2<BinaryRowData, BinaryRowData> next() {
throw new RuntimeException("Not support!");
}
};
} | 3.68 |
morf_AbstractSqlDialectTest_expectedUpper | /**
* @return The expected SQL for the UPPER function.
*/
protected String expectedUpper() {
return "SELECT UPPER(field1) FROM " + tableName("schedule");
} | 3.68 |
flink_BuildSideIterator_moveToNextBucket | /**
* Move to next bucket, return true while move to a on heap bucket, return false while move
* to a spilled bucket or there is no more bucket.
*/
private boolean moveToNextBucket() {
scanCount++;
if (scanCount >= area.numBuckets) {
return false;
}
// move to next bucket, update all the current bucket status with new bucket
// information.
final int bucketArrayPos = scanCount >> area.table.bucketsPerSegmentBits;
final int currentBucketInSegmentOffset =
(scanCount & area.table.bucketsPerSegmentMask)
<< BinaryHashBucketArea.BUCKET_SIZE_BITS;
MemorySegment currentBucket = area.buckets[bucketArrayPos];
setBucket(currentBucket, area.overflowSegments, currentBucketInSegmentOffset);
return true;
} | 3.68 |
hbase_SpaceQuotaSnapshot_getNoSuchSnapshot | /**
* Returns a singleton that corresponds to no snapshot information.
*/
public static SpaceQuotaSnapshot getNoSuchSnapshot() {
return NO_SUCH_SNAPSHOT;
} | 3.68 |
hbase_SnapshotQuotaObserverChore_computeSnapshotSizes | /**
* Computes the size of each snapshot provided given the current files referenced by the table.
* @param snapshotsToComputeSize The snapshots to compute the size of
* @return A mapping of table to snapshot created from that table and the snapshot's size.
*/
Map<String, Long> computeSnapshotSizes(Multimap<TableName, String> snapshotsToComputeSize)
throws IOException {
final Map<String, Long> snapshotSizesByNamespace = new HashMap<>();
final long start = System.nanoTime();
for (Entry<TableName, Collection<String>> entry : snapshotsToComputeSize.asMap().entrySet()) {
final TableName tn = entry.getKey();
final Collection<String> snapshotNames = entry.getValue();
// Get our notifier instance, this is tracking archivals that happen out-of-band of this chore
FileArchiverNotifier notifier = getNotifierForTable(tn);
// The total size consumed by all snapshots against this table
long totalSnapshotSize = notifier.computeAndStoreSnapshotSizes(snapshotNames);
// Bucket that size into the appropriate namespace
snapshotSizesByNamespace.merge(tn.getNamespaceAsString(), totalSnapshotSize, Long::sum);
}
// Update the amount of time it took to compute the size of the snapshots for a table
if (metrics != null) {
metrics.incrementSnapshotSizeComputationTime((System.nanoTime() - start) / 1_000_000);
}
return snapshotSizesByNamespace;
} | 3.68 |
dubbo_Version_getFromFile | /**
* get version from file: path/to/group-module-x.y.z.jar, returns x.y.z
*/
private static String getFromFile(String file) {
// remove suffix ".jar": "path/to/group-module-x.y.z"
file = file.substring(0, file.length() - 4);
// remove path: "group-module-x.y.z"
int i = file.lastIndexOf('/');
if (i >= 0) {
file = file.substring(i + 1);
}
// remove group: "module-x.y.z"
i = file.indexOf("-");
if (i >= 0) {
file = file.substring(i + 1);
}
// remove module: "x.y.z"
while (file.length() > 0 && !Character.isDigit(file.charAt(0))) {
i = file.indexOf("-");
if (i >= 0) {
file = file.substring(i + 1);
} else {
break;
}
}
return file;
} | 3.68 |
hadoop_AuditReplayCommand_isPoison | /**
* If true, the thread which consumes this item should not process any further
* items and instead simply terminate itself.
*/
boolean isPoison() {
return false;
} | 3.68 |
hadoop_OBSListing_createFileStatusListingIterator | /**
* Create a FileStatus iterator against a path, with a given list object
* request.
*
* @param listPath path of the listing
* @param request initial request to make
* @param filter the filter on which paths to accept
* @param acceptor the class/predicate to decide which entries to accept in
* the listing based on the full file status.
* @return the iterator
* @throws IOException IO Problems
*/
FileStatusListingIterator createFileStatusListingIterator(
final Path listPath,
final ListObjectsRequest request,
final PathFilter filter,
final FileStatusAcceptor acceptor)
throws IOException {
return new FileStatusListingIterator(
new ObjectListingIterator(listPath, request), filter, acceptor);
} | 3.68 |
hudi_HoodieBackedTableMetadataWriter_listAllPartitionsFromMDT | /**
* Function to find hoodie partitions and list files in them in parallel from MDT.
*
* @param initializationTime Files which have a timestamp after this are neglected
* @return List consisting of {@code DirectoryInfo} for each partition found.
*/
private List<DirectoryInfo> listAllPartitionsFromMDT(String initializationTime) throws IOException {
List<DirectoryInfo> dirinfoList = new LinkedList<>();
List<String> allPartitionPaths = metadata.getAllPartitionPaths().stream()
.map(partitionPath -> dataWriteConfig.getBasePath() + "/" + partitionPath).collect(Collectors.toList());
Map<String, FileStatus[]> partitionFileMap = metadata.getAllFilesInPartitions(allPartitionPaths);
for (Map.Entry<String, FileStatus[]> entry : partitionFileMap.entrySet()) {
dirinfoList.add(new DirectoryInfo(entry.getKey(), entry.getValue(), initializationTime));
}
return dirinfoList;
} | 3.68 |
hbase_SnapshotManager_cleanupSentinels | /**
* Remove the sentinels that are marked as finished and the completion time has exceeded the
* removal timeout.
* @param sentinels map of sentinels to clean
*/
private synchronized void cleanupSentinels(final Map<TableName, SnapshotSentinel> sentinels) {
long currentTime = EnvironmentEdgeManager.currentTime();
long sentinelsCleanupTimeoutMillis =
master.getConfiguration().getLong(HBASE_SNAPSHOT_SENTINELS_CLEANUP_TIMEOUT_MILLIS,
SNAPSHOT_SENTINELS_CLEANUP_TIMEOUT_MILLS_DEFAULT);
Iterator<Map.Entry<TableName, SnapshotSentinel>> it = sentinels.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<TableName, SnapshotSentinel> entry = it.next();
SnapshotSentinel sentinel = entry.getValue();
if (
sentinel.isFinished()
&& (currentTime - sentinel.getCompletionTimestamp()) > sentinelsCleanupTimeoutMillis
) {
it.remove();
}
}
} | 3.68 |
flink_RestoredCheckpointStats_getCheckpointId | /**
* Returns the ID of this checkpoint.
*
* @return ID of this checkpoint.
*/
public long getCheckpointId() {
return checkpointId;
} | 3.68 |
hbase_MetricsSource_getTimeStampNextToReplicate | /**
* TimeStamp of next edit to be replicated.
* @return timeStampNextToReplicate - TimeStamp of next edit to be replicated.
*/
public long getTimeStampNextToReplicate() {
return timeStampNextToReplicate;
} | 3.68 |
flink_FileSystem_create | /**
* Opens an FSDataOutputStream at the indicated Path.
*
* @param f the file name to open
* @param overwrite if a file with this name already exists, then if true, the file will be
* overwritten, and if false an error will be thrown.
* @throws IOException Thrown, if the stream could not be opened because of an I/O, or because a
* file already exists at that path and the write mode indicates to not overwrite the file.
* @deprecated Use {@link #create(Path, WriteMode)} instead.
*/
@Deprecated
public FSDataOutputStream create(Path f, boolean overwrite) throws IOException {
return create(f, overwrite ? WriteMode.OVERWRITE : WriteMode.NO_OVERWRITE);
} | 3.68 |
hbase_ZooKeeperHelper_ensureConnectedZooKeeper | /**
* Ensure passed zookeeper is connected.
* @param timeout Time to wait on established Connection
*/
public static ZooKeeper ensureConnectedZooKeeper(ZooKeeper zookeeper, int timeout)
throws ZooKeeperConnectionException {
if (zookeeper.getState().isConnected()) {
return zookeeper;
}
Stopwatch stopWatch = Stopwatch.createStarted();
// Make sure we are connected before we hand it back.
while (!zookeeper.getState().isConnected()) {
Threads.sleep(1);
if (stopWatch.elapsed(TimeUnit.MILLISECONDS) > timeout) {
throw new ZooKeeperConnectionException("Failed connect after waiting "
+ stopWatch.elapsed(TimeUnit.MILLISECONDS) + "ms (zk session timeout); " + zookeeper);
}
}
return zookeeper;
} | 3.68 |
flink_SqlLikeUtils_sqlToRegexLike | /** Translates a SQL LIKE pattern to Java regex pattern. */
static String sqlToRegexLike(String sqlPattern, char escapeChar) {
int i;
final int len = sqlPattern.length();
final StringBuilder javaPattern = new StringBuilder(len + len);
for (i = 0; i < len; i++) {
char c = sqlPattern.charAt(i);
if (JAVA_REGEX_SPECIALS.indexOf(c) >= 0) {
javaPattern.append('\\');
}
if (c == escapeChar) {
if (i == (sqlPattern.length() - 1)) {
throw invalidEscapeSequence(sqlPattern, i);
}
char nextChar = sqlPattern.charAt(i + 1);
if ((nextChar == '_') || (nextChar == '%') || (nextChar == escapeChar)) {
javaPattern.append(nextChar);
i++;
} else {
throw invalidEscapeSequence(sqlPattern, i);
}
} else if (c == '_') {
javaPattern.append('.');
} else if (c == '%') {
javaPattern.append("(?s:.*)");
} else {
javaPattern.append(c);
}
}
return javaPattern.toString();
} | 3.68 |
framework_VButton_onClick | /**
* Called internally when the user finishes clicking on this button. The
* default behavior is to fire the click event to listeners. Subclasses that
* override {@link #onClickStart()} should override this method to restore
* the normal widget display.
* <p>
* To add custom code for a click event, override
* {@link #onClick(ClickEvent)} instead of this.
* <p>
* For internal use only. May be removed or replaced in the future.
*/
public void onClick() {
// Allow the click we're about to synthesize to pass through to the
// superclass and containing elements. Element.dispatchEvent() is
// synchronous, so we simply set and clear the flag within this method.
disallowNextClick = false;
// Screen coordinates are not always available (e.g., when the click is
// caused by a keyboard event).
// Set (x,y) client coordinates to the middle of the button
int x = getElement().getAbsoluteLeft() - getElement().getScrollLeft()
- getElement().getOwnerDocument().getScrollLeft()
+ WidgetUtil.getRequiredWidth(getElement()) / 2;
int y = getElement().getAbsoluteTop() - getElement().getScrollTop()
- getElement().getOwnerDocument().getScrollTop()
+ WidgetUtil.getRequiredHeight(getElement()) / 2;
NativeEvent evt = Document.get().createClickEvent(1, 0, 0, x, y, false,
false, false, false);
getElement().dispatchEvent(evt);
} | 3.68 |
hudi_HoodieCombineHiveInputFormat_getLength | /**
* Returns the length of the i<sup>th</sup> Path.
*/
@Override
public long getLength(int i) {
return inputSplitShim.getLength(i);
} | 3.68 |
hbase_MasterCoprocessorHost_postCompletedSplitRegionAction | /**
* Invoked just after a split
* @param regionInfoA the new left-hand daughter region
* @param regionInfoB the new right-hand daughter region
* @param user the user
*/
public void postCompletedSplitRegionAction(final RegionInfo regionInfoA,
final RegionInfo regionInfoB, final User user) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) {
@Override
public void call(MasterObserver observer) throws IOException {
observer.postCompletedSplitRegionAction(this, regionInfoA, regionInfoB);
}
});
} | 3.68 |
hbase_AsyncTableResultScanner_isSuspended | // used in tests to test whether the scanner has been suspended
synchronized boolean isSuspended() {
return resumer != null;
} | 3.68 |
hadoop_DockerCommandExecutor_isKillable | /**
* Is the container in a killable state?
*
* @param containerStatus the container's {@link DockerContainerStatus}.
* @return is the container in a killable state.
*/
public static boolean isKillable(DockerContainerStatus containerStatus) {
return isStoppable(containerStatus);
} | 3.68 |
hadoop_AbfsTokenRenewer_renew | /**
* Renew the delegation token.
*
* @param token token to renew.
* @param conf configuration object.
* @return extended expiry time of the token.
* @throws IOException thrown when trying get current user.
* @throws InterruptedException thrown when thread is interrupted
*/
@Override
public long renew(final Token<?> token, Configuration conf)
throws IOException, InterruptedException {
LOG.debug("Renewing the delegation token");
return getInstance(conf).renewDelegationToken(token);
} | 3.68 |
dubbo_Stack_get | /**
* get.
*
* @param index index.
* @return element.
*/
public E get(int index) {
if (index >= mSize || index + mSize < 0) {
throw new IndexOutOfBoundsException("Index: " + index + ", Size: " + mSize);
}
return index < 0 ? mElements.get(index + mSize) : mElements.get(index);
} | 3.68 |
hbase_NettyUnsafeUtils_closeImmediately | /**
* Directly closes the channel, setting SO_LINGER to 0 and skipping any handlers in the pipeline.
* This is useful for cases where it's important to immediately close without any delay.
* Otherwise, pipeline handlers and even general TCP flows can cause a normal close to take
* upwards of a few second or more. This will likely cause the client side to see either a
* "Connection reset by peer" or unexpected ConnectionClosedException.
* <p>
* <b>It's necessary to call this from within the channel's eventLoop!</b>
*/
public static void closeImmediately(Channel channel) {
assert channel.eventLoop().inEventLoop();
channel.config().setOption(ChannelOption.SO_LINGER, 0);
channel.unsafe().close(channel.voidPromise());
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.