name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_Tuple23_toString | /**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8,
* f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, f22), where the individual
* fields are the value returned by calling {@link Object#toString} on that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return "("
+ StringUtils.arrayAwareToString(this.f0)
+ ","
+ StringUtils.arrayAwareToString(this.f1)
+ ","
+ StringUtils.arrayAwareToString(this.f2)
+ ","
+ StringUtils.arrayAwareToString(this.f3)
+ ","
+ StringUtils.arrayAwareToString(this.f4)
+ ","
+ StringUtils.arrayAwareToString(this.f5)
+ ","
+ StringUtils.arrayAwareToString(this.f6)
+ ","
+ StringUtils.arrayAwareToString(this.f7)
+ ","
+ StringUtils.arrayAwareToString(this.f8)
+ ","
+ StringUtils.arrayAwareToString(this.f9)
+ ","
+ StringUtils.arrayAwareToString(this.f10)
+ ","
+ StringUtils.arrayAwareToString(this.f11)
+ ","
+ StringUtils.arrayAwareToString(this.f12)
+ ","
+ StringUtils.arrayAwareToString(this.f13)
+ ","
+ StringUtils.arrayAwareToString(this.f14)
+ ","
+ StringUtils.arrayAwareToString(this.f15)
+ ","
+ StringUtils.arrayAwareToString(this.f16)
+ ","
+ StringUtils.arrayAwareToString(this.f17)
+ ","
+ StringUtils.arrayAwareToString(this.f18)
+ ","
+ StringUtils.arrayAwareToString(this.f19)
+ ","
+ StringUtils.arrayAwareToString(this.f20)
+ ","
+ StringUtils.arrayAwareToString(this.f21)
+ ","
+ StringUtils.arrayAwareToString(this.f22)
+ ")";
} | 3.68 |
dubbo_Help_mainHelp | /*
* output main help
*/
private String mainHelp() {
final TTable tTable = new TTable(new TTable.ColumnDefine[] {
new TTable.ColumnDefine(TTable.Align.RIGHT), new TTable.ColumnDefine(80, false, TTable.Align.LEFT)
});
final List<Class<?>> classes = commandHelper.getAllCommandClass();
Collections.sort(classes, new Comparator<Class<?>>() {
@Override
public int compare(Class<?> o1, Class<?> o2) {
final Integer o1s = o1.getAnnotation(Cmd.class).sort();
final Integer o2s = o2.getAnnotation(Cmd.class).sort();
return o1s.compareTo(o2s);
}
});
for (Class<?> clazz : classes) {
if (clazz.isAnnotationPresent(Cmd.class)) {
final Cmd cmd = clazz.getAnnotation(Cmd.class);
tTable.addRow(cmd.name(), cmd.summary());
}
}
return tTable.padding(1).rendering();
} | 3.68 |
querydsl_GeometryExpressions_lineStringOperation | /**
* Create a new LineString operation expression
*
* @param op operator
* @param args arguments
* @return operation expression
*/
public static LineStringExpression<LineString> lineStringOperation(Operator op, Expression<?>... args) {
return new LineStringOperation<LineString>(LineString.class, op, args);
} | 3.68 |
flink_BlockStatementSplitter_extractBlocks | /**
* This method extracts statements from IFs, ELSE's and WHILE blocks from block code used during
* initialization of this object. Every entry of returned map can be seen as new method name
* (map key) and method's body (map value). The block names will be prefixed with provided
* context.
*
* @return a map of block name to block statements mappings. The key can be interpreted as name
* of extracted block/method and corresponding List represents individual statements (block'
* lines) for this block.
*/
public Map<String, List<String>> extractBlocks() {
Map<String, List<String>> allBlocks =
CollectionUtil.newHashMapWithExpectedSize(visitor.blocks.size());
for (Entry<String, List<ParserRuleContext>> entry : visitor.blocks.entrySet()) {
List<String> blocks =
entry.getValue().stream()
.map(CodeSplitUtil::getContextString)
.collect(Collectors.toList());
allBlocks.put(entry.getKey(), blocks);
}
return allBlocks;
} | 3.68 |
framework_RpcDataProviderExtension_columnsRemoved | /**
* Informs this data provider that given columns have been removed from
* grid.
*
* @param removedColumns
* a list of removed columns
*/
public void columnsRemoved(List<Column> removedColumns) {
for (GridValueChangeListener l : activeItemHandler
.getValueChangeListeners()) {
l.removeColumns(removedColumns);
}
// No need to resend unchanged data. Client will remember the old
// columns until next set of rows is sent.
} | 3.68 |
hbase_HttpServer_getOrEmptyString | /**
* Extracts the value for the given key from the configuration of returns a string of zero length.
*/
private String getOrEmptyString(Configuration conf, String key) {
if (null == key) {
return EMPTY_STRING;
}
final String value = conf.get(key.trim());
return null == value ? EMPTY_STRING : value;
} | 3.68 |
hbase_HRegion_throwException | //// method for debugging tests
void throwException(String title, String regionName) {
StringBuilder buf = new StringBuilder();
buf.append(title + ", ");
buf.append(getRegionInfo().toString());
buf.append(getRegionInfo().isMetaRegion() ? " meta region " : " ");
buf.append("stores: ");
for (HStore s : stores.values()) {
buf.append(s.getColumnFamilyDescriptor().getNameAsString());
buf.append(" size: ");
buf.append(s.getMemStoreSize().getDataSize());
buf.append(" ");
}
buf.append("end-of-stores");
buf.append(", memstore size ");
buf.append(getMemStoreDataSize());
if (getRegionInfo().getRegionNameAsString().startsWith(regionName)) {
throw new RuntimeException(buf.toString());
}
} | 3.68 |
hudi_OptionsResolver_isDefaultHoodieRecordPayloadClazz | /**
* Returns whether the payload clazz is {@link DefaultHoodieRecordPayload}.
*/
public static boolean isDefaultHoodieRecordPayloadClazz(Configuration conf) {
return conf.getString(FlinkOptions.PAYLOAD_CLASS_NAME).contains(DefaultHoodieRecordPayload.class.getSimpleName());
} | 3.68 |
hudi_BloomFilterUtils_getNumHashes | /**
* @return the number of hashes given the bitsize and total number of entries.
*/
static int getNumHashes(int bitSize, int numEntries) {
// Number of the hash functions
return (int) Math.ceil(Math.log(2) * bitSize / numEntries);
} | 3.68 |
flink_TaskExecutorMemoryConfiguration_getTaskHeap | /** Returns the configured heap size used by the tasks. */
public Long getTaskHeap() {
return taskHeap;
} | 3.68 |
hbase_StripeStoreFileManager_markCompactedAway | // Mark the files as compactedAway once the storefiles and compactedfiles list is finalised
// Let a background thread close the actual reader on these compacted files and also
// ensure to evict the blocks from block cache so that they are no longer in
// cache
private void markCompactedAway(Collection<HStoreFile> compactedFiles) {
for (HStoreFile file : compactedFiles) {
file.markCompactedAway();
}
} | 3.68 |
hudi_SanitizationUtils_transformList | /**
* Parse list for sanitizing
* @param src - deserialized schema
* @param invalidCharMask - mask to replace invalid characters with
*/
private static List<Object> transformList(List<Object> src, String invalidCharMask) {
return src.stream().map(obj -> {
if (obj instanceof List) {
return transformList((List<Object>) obj, invalidCharMask);
} else if (obj instanceof Map) {
return transformMap((Map<String, Object>) obj, invalidCharMask);
} else {
return obj;
}
}).collect(Collectors.toList());
} | 3.68 |
hbase_FSDataInputStreamWrapper_unbuffer | /**
* This will free sockets and file descriptors held by the stream only when the stream implements
* org.apache.hadoop.fs.CanUnbuffer. NOT THREAD SAFE. Must be called only when all the clients
* using this stream to read the blocks have finished reading. If by chance the stream is
* unbuffered and there are clients still holding this stream for read then on next client read
* request a new socket will be opened by Datanode without client knowing about it and will serve
* its read request. Note: If this socket is idle for some time then the DataNode will close the
* socket and the socket will move into CLOSE_WAIT state and on the next client request on this
* stream, the current socket will be closed and a new socket will be opened to serve the
* requests.
*/
@SuppressWarnings({ "rawtypes" })
public void unbuffer() {
FSDataInputStream stream = this.getStream(this.shouldUseHBaseChecksum());
if (stream != null) {
InputStream wrappedStream = stream.getWrappedStream();
// CanUnbuffer interface was added as part of HDFS-7694 and the fix is available in Hadoop
// 2.6.4+ and 2.7.1+ versions only so check whether the stream object implements the
// CanUnbuffer interface or not and based on that call the unbuffer api.
final Class<? extends InputStream> streamClass = wrappedStream.getClass();
if (this.instanceOfCanUnbuffer == null) {
// To ensure we compute whether the stream is instance of CanUnbuffer only once.
this.instanceOfCanUnbuffer = false;
if (wrappedStream instanceof CanUnbuffer) {
this.unbuffer = (CanUnbuffer) wrappedStream;
this.instanceOfCanUnbuffer = true;
}
}
if (this.instanceOfCanUnbuffer) {
try {
this.unbuffer.unbuffer();
} catch (UnsupportedOperationException e) {
if (isLogTraceEnabled) {
LOG.trace("Failed to invoke 'unbuffer' method in class " + streamClass
+ " . So there may be the stream does not support unbuffering.", e);
}
}
} else {
if (isLogTraceEnabled) {
LOG.trace("Failed to find 'unbuffer' method in class " + streamClass);
}
}
}
} | 3.68 |
hadoop_Interns_tag | /**
* Get a metrics tag.
* @param name of the tag
* @param description of the tag
* @param value of the tag
* @return an interned metrics tag
*/
public static MetricsTag tag(String name, String description, String value) {
return Tags.INSTANCE.cache.add(info(name, description), value);
} | 3.68 |
pulsar_WebSocketWebResource_validateUserAccess | /**
* Checks if user has super-user access or user is authorized to produce/consume on a given topic.
*
* @param topic
* @throws RestException
*/
protected void validateUserAccess(TopicName topic) {
boolean isAuthorized = false;
try {
validateSuperUserAccess();
isAuthorized = true;
} catch (Exception e) {
try {
isAuthorized = isAuthorized(topic);
} catch (Exception ne) {
throw new RestException(ne);
}
}
if (!isAuthorized) {
throw new RestException(Status.UNAUTHORIZED, "Don't have permission to access this topic");
}
} | 3.68 |
hadoop_RouterStateIdContext_receiveRequestState | /**
* Routers do not update their state using information from clients
* to avoid clients interfering with one another.
*/
@Override
public long receiveRequestState(RpcRequestHeaderProto header,
long clientWaitTime) throws RetriableException {
// Do nothing.
return 0;
} | 3.68 |
framework_AbstractEmbedded_setSource | /**
* Sets the object source resource. The dimensions are assumed if possible.
* The type is guessed from resource.
*
* @param source
* the source to set.
*/
public void setSource(Resource source) {
setResource(AbstractEmbeddedState.SOURCE_RESOURCE, source);
} | 3.68 |
hbase_Result_isCursor | /**
* Return true if this Result is a cursor to tell users where the server has scanned. In this
* Result the only meaningful method is {@link #getCursor()}. {@code
* while (r = scanner.next() && r != null) {
* if(r.isCursor()){
* // scanning is not end, it is a cursor, save its row key and close scanner if you want, or
* // just continue the loop to call next(). } else { // just like before } } // scanning is end }
* {@link Scan#setNeedCursorResult(boolean)} {@link Cursor} {@link #getCursor()}
*/
public boolean isCursor() {
return cursor != null;
} | 3.68 |
hbase_AvlUtil_getFirst | /**
* Return the first node of the tree.
* @param root the current root of the tree
* @return the first (min) node of the tree
*/
public static <TNode extends AvlNode> TNode getFirst(TNode root) {
if (root != null) {
while (root.avlLeft != null) {
root = (TNode) root.avlLeft;
}
}
return root;
} | 3.68 |
framework_Design_classNameToElementName | /**
* Creates the name of the html tag corresponding to the given class
* name. The name is derived by converting each uppercase letter to
* lowercase and inserting a dash before the letter. No dash is inserted
* before the first letter of the class name.
*
* @param className
* the name of the class without a package name
* @return the html tag name corresponding to className
*/
private String classNameToElementName(String className) {
StringBuilder result = new StringBuilder();
for (int i = 0; i < className.length(); i++) {
Character c = className.charAt(i);
if (Character.isUpperCase(c)) {
if (i > 0) {
result.append('-');
}
result.append(Character.toLowerCase(c));
} else {
result.append(c);
}
}
return result.toString();
} | 3.68 |
hbase_DefaultCompactor_compact | /**
* Do a minor/major compaction on an explicit set of storefiles from a Store.
*/
public List<Path> compact(final CompactionRequestImpl request,
ThroughputController throughputController, User user) throws IOException {
return compact(request, defaultScannerFactory, writerFactory, throughputController, user);
} | 3.68 |
flink_RecordMapperWrapperRecordIterator_wrapReader | /**
* Wrap a {@link BulkFormat.Reader} applying a {@link RecordMapper} on the returned iterator.
*
* @param <I> Input type
* @param <O> Mapped output type
*/
public static <I, O> BulkFormat.Reader<O> wrapReader(
BulkFormat.Reader<I> wrappedReader, RecordMapper<I, O> recordMapper) {
return new BulkFormat.Reader<O>() {
@Nullable
@Override
public BulkFormat.RecordIterator<O> readBatch() throws IOException {
BulkFormat.RecordIterator<I> iterator = wrappedReader.readBatch();
if (iterator == null) {
return null;
}
return new RecordMapperWrapperRecordIterator<>(iterator, recordMapper);
}
@Override
public void close() throws IOException {
wrappedReader.close();
}
};
} | 3.68 |
querydsl_AliasFactory_createAliasForVariable | /**
* Create an alias instance for the given class and variable name
*
* @param <A>
* @param cl type for alias
* @param var variable name for the underlying expression
* @return alias instance
*/
public <A> A createAliasForVariable(Class<A> cl, String var) {
final Path<A> expr = pathFactory.createEntityPath(cl, PathMetadataFactory.forVariable(var));
return createAliasForExpr(cl, expr);
} | 3.68 |
hadoop_OBSInputStream_checkNotClosed | /**
* Verify that the input stream is open. Non blocking; this gives the last
* state of the volatile {@link #closed} field.
*
* @throws IOException if the connection is closed.
*/
private void checkNotClosed() throws IOException {
if (closed) {
throw new IOException(
uri + ": " + FSExceptionMessages.STREAM_IS_CLOSED);
}
} | 3.68 |
graphhopper_GHRequest_setAlgorithm | /**
* For possible values see AlgorithmOptions.*
*/
public GHRequest setAlgorithm(String algo) {
if (algo != null)
this.algo = Helper.camelCaseToUnderScore(algo);
return this;
} | 3.68 |
flink_DiskCacheManager_getBufferIndex | /**
* Return the current buffer index.
*
* @param subpartitionId the target subpartition id
* @return the finished buffer index
*/
int getBufferIndex(int subpartitionId) {
return subpartitionCacheManagers[subpartitionId].getBufferIndex();
} | 3.68 |
hadoop_PipesPartitioner_setNextPartition | /**
* Set the next key to have the given partition.
* @param newValue the next partition value
*/
static void setNextPartition(int newValue) {
CACHE.set(newValue);
} | 3.68 |
hbase_AsyncTable_checkAndMutateAll | /**
* A simple version of batch checkAndMutate. It will fail if there are any failures.
* @param checkAndMutates The list of rows to apply.
* @return A {@link CompletableFuture} that wrapper the result list.
*/
default CompletableFuture<List<CheckAndMutateResult>>
checkAndMutateAll(List<CheckAndMutate> checkAndMutates) {
return allOf(checkAndMutate(checkAndMutates));
} | 3.68 |
morf_AliasedField_as | /**
* Specifies the alias to use for the field.
*
* <p>TODO uses transitional immutable logic. When this can be removed,
* we can make {@code alias final}.</p>
*
* @param aliasName the name of the alias
* @return an updated {@link AliasedField} (this will not be a new object)
*/
@Override
public AliasedField as(String aliasName) {
if (immutableDslEnabled()) {
return shallowCopy(aliasName);
} else {
this.alias = aliasName;
return this;
}
} | 3.68 |
druid_ZookeeperNodeRegister_deregister | /**
* Close the current GroupMember.
*/
public void deregister() {
if (member != null) {
member.close();
member = null;
}
if (client != null && privateZkClient) {
client.close();
}
} | 3.68 |
framework_GridLayout_removeAllComponents | /*
* Removes all components from this container.
*
* @see com.vaadin.ui.ComponentContainer#removeAllComponents()
*/
@Override
public void removeAllComponents() {
super.removeAllComponents();
cursorX = 0;
cursorY = 0;
} | 3.68 |
querydsl_SQLExpressions_addDays | /**
* Add the given amount of days to the date
*
* @param date date
* @param days days to add
* @return converted date
*/
public static <D extends Comparable> DateExpression<D> addDays(DateExpression<D> date, int days) {
return Expressions.dateOperation(date.getType(), Ops.DateTimeOps.ADD_DAYS, date, ConstantImpl.create(days));
} | 3.68 |
hbase_HMaster_startProcedureExecutor | // will be override in UT
protected void startProcedureExecutor() throws IOException {
procedureExecutor.startWorkers();
} | 3.68 |
hudi_S3EventsMetaSelector_getNextEventsFromQueue | /**
* Get the list of events from queue.
*
* @param lastCheckpointStr The last checkpoint instant string, empty if first run.
* @return A pair of dataset of event records and the next checkpoint instant string.
*/
public Pair<List<String>, String> getNextEventsFromQueue(SqsClient sqs,
Option<String> lastCheckpointStr,
List<Message> processedMessages) {
processedMessages.clear();
log.info("Reading messages....");
try {
log.info("Start Checkpoint : " + lastCheckpointStr);
List<Map<String, Object>> eventRecords = getValidEvents(sqs, processedMessages);
log.info("Number of valid events: " + eventRecords.size());
List<String> filteredEventRecords = new ArrayList<>();
long newCheckpointTime = eventRecords.stream()
.mapToLong(eventRecord -> Date.from(Instant.from(
DateTimeFormatter.ISO_INSTANT.parse((String) eventRecord.get(S3_MODEL_EVENT_TIME))))
.getTime()).max().orElse(lastCheckpointStr.map(Long::parseLong).orElse(0L));
for (Map<String, Object> eventRecord : eventRecords) {
filteredEventRecords.add(new ObjectMapper().writeValueAsString(eventRecord).replace("%3D", "=")
.replace("%24", "$").replace("%A3", "£").replace("%23", "#").replace("%26", "&").replace("%3F", "?")
.replace("%7E", "~").replace("%25", "%").replace("%2B", "+"));
}
// Return the old checkpoint if no messages to consume from queue.
String newCheckpoint = newCheckpointTime == 0 ? lastCheckpointStr.orElse(null) : String.valueOf(newCheckpointTime);
return new ImmutablePair<>(filteredEventRecords, newCheckpoint);
} catch (JSONException | IOException e) {
throw new HoodieException("Unable to read from SQS: ", e);
}
} | 3.68 |
hadoop_RenameOperation_initiateCopy | /**
* Initiate a copy operation in the executor.
* @param source status of the source object.
* @param key source key
* @param newDestKey destination key
* @param childDestPath destination path.
* @return the future.
*/
protected CompletableFuture<Path> initiateCopy(
final S3ALocatedFileStatus source,
final String key,
final String newDestKey,
final Path childDestPath) {
S3ObjectAttributes sourceAttributes =
callbacks.createObjectAttributes(
source.getPath(),
source.getEtag(),
source.getVersionId(),
source.getLen());
// queue the copy operation for execution in the thread pool
return submit(getStoreContext().getExecutor(),
callableWithinAuditSpan(getAuditSpan(), () ->
copySource(
key,
sourceAttributes,
callbacks.createReadContext(source),
childDestPath,
newDestKey)));
} | 3.68 |
druid_MySqlStatementParser_parseSelectInto | /**
* parse select into
*/
public MySqlSelectIntoStatement parseSelectInto() {
MySqlSelectIntoParser parse = new MySqlSelectIntoParser(this.exprParser);
return parse.parseSelectInto();
} | 3.68 |
hadoop_ZKSignerSecretProvider_createCuratorClient | /**
* This method creates the Curator client and connects to ZooKeeper.
* @param config configuration properties
* @return A Curator client
*/
protected CuratorFramework createCuratorClient(Properties config) {
String connectionString = config.getProperty(ZOOKEEPER_CONNECTION_STRING, "localhost:2181");
String authType = config.getProperty(ZOOKEEPER_AUTH_TYPE, "none");
String keytab = config.getProperty(ZOOKEEPER_KERBEROS_KEYTAB, "").trim();
String principal = config.getProperty(ZOOKEEPER_KERBEROS_PRINCIPAL, "").trim();
boolean sslEnabled = Boolean.parseBoolean(config.getProperty(ZOOKEEPER_SSL_ENABLED, "false"));
String keystoreLocation = config.getProperty(ZOOKEEPER_SSL_KEYSTORE_LOCATION, "");
String keystorePassword = config.getProperty(ZOOKEEPER_SSL_KEYSTORE_PASSWORD, "");
String truststoreLocation = config.getProperty(ZOOKEEPER_SSL_TRUSTSTORE_LOCATION, "");
String truststorePassword = config.getProperty(ZOOKEEPER_SSL_TRUSTSTORE_PASSWORD, "");
CuratorFramework zkClient =
ZookeeperClient.configure()
.withConnectionString(connectionString)
.withAuthType(authType)
.withKeytab(keytab)
.withPrincipal(principal)
.withJaasLoginEntryName(JAAS_LOGIN_ENTRY_NAME)
.enableSSL(sslEnabled)
.withKeystore(keystoreLocation)
.withKeystorePassword(keystorePassword)
.withTruststore(truststoreLocation)
.withTruststorePassword(truststorePassword)
.create();
zkClient.start();
return zkClient;
} | 3.68 |
hadoop_MutableGaugeInt_toString | /**
* @return the value of the metric
*/
public String toString() {
return value.toString();
} | 3.68 |
hadoop_RegistryPathUtils_encodeForRegistry | /**
* Perform any formatting for the registry needed to convert
* non-simple-DNS elements
* @param element element to encode
* @return an encoded string
*/
public static String encodeForRegistry(String element) {
return IDN.toASCII(element);
} | 3.68 |
hbase_RecoveredEditsOutputSink_getRecoveredEditsWriter | /**
* Get a writer and path for a log starting at the given entry. This function is threadsafe so
* long as multiple threads are always acting on different regions.
* @return null if this region shouldn't output any logs
*/
private RecoveredEditsWriter getRecoveredEditsWriter(TableName tableName, byte[] region,
long seqId) throws IOException {
RecoveredEditsWriter ret = writers.get(Bytes.toString(region));
if (ret != null) {
return ret;
}
ret = createRecoveredEditsWriter(tableName, region, seqId);
if (ret == null) {
return null;
}
LOG.trace("Created {}", ret.path);
writers.put(Bytes.toString(region), ret);
return ret;
} | 3.68 |
hibernate-validator_MappingXmlParser_parse | /**
* Parses the given set of input stream representing XML constraint
* mappings.
*
* @param mappingStreams The streams to parse. Must support the mark/reset contract.
*/
public final void parse(Set<InputStream> mappingStreams) {
ClassLoader previousTccl = run( GetClassLoader.fromContext() );
try {
run( SetContextClassLoader.action( MappingXmlParser.class.getClassLoader() ) );
Set<String> alreadyProcessedConstraintDefinitions = newHashSet();
for ( InputStream in : mappingStreams ) {
// the InputStreams passed in parameters support mark and reset
in.mark( Integer.MAX_VALUE );
XMLEventReader xmlEventReader = xmlParserHelper.createXmlEventReader( "constraint mapping file", new CloseIgnoringInputStream( in ) );
String schemaVersion = xmlParserHelper.getSchemaVersion( "constraint mapping file", xmlEventReader );
xmlEventReader.close();
in.reset();
// The validation is done first as StAX builders used below are assuming that the XML file is correct and don't
// do any validation of the input.
String schemaResourceName = getSchemaResourceName( schemaVersion );
Schema schema = xmlParserHelper.getSchema( schemaResourceName );
if ( schema == null ) {
throw LOG.unableToGetXmlSchema( schemaResourceName );
}
Validator validator = schema.newValidator();
validator.validate( new StreamSource( new CloseIgnoringInputStream( in ) ) );
in.reset();
ConstraintMappingsStaxBuilder constraintMappingsStaxBuilder = new ConstraintMappingsStaxBuilder(
classLoadingHelper, constraintCreationContext,
annotationProcessingOptions, javaBeanHelper, defaultSequences
);
xmlEventReader = xmlParserHelper.createXmlEventReader( "constraint mapping file", new CloseIgnoringInputStream( in ) );
while ( xmlEventReader.hasNext() ) {
constraintMappingsStaxBuilder.process( xmlEventReader, xmlEventReader.nextEvent() );
}
// at this point we only build the constraint definitions.
// we want to fully populate the constraint helper and get the final rules for which
// validators will be applied before we build any constrained elements that contribute to
// final bean metadata.
constraintMappingsStaxBuilder.buildConstraintDefinitions( alreadyProcessedConstraintDefinitions );
// we only add the builder to process it later if it has anything related to bean's constraints,
// otherwise it was only about constraint definition, and we've processed it already.
if ( constraintMappingsStaxBuilder.hasBeanBuilders() ) {
mappingBuilders.add( constraintMappingsStaxBuilder );
}
xmlEventReader.close();
in.reset();
}
}
catch (IOException | XMLStreamException | SAXException e) {
throw LOG.getErrorParsingMappingFileException( e );
}
finally {
run( SetContextClassLoader.action( previousTccl ) );
}
} | 3.68 |
hbase_ProcedureExecutor_abort | /**
* Send an abort notification to the specified procedure. Depending on the procedure
* implementation, the abort can be considered or ignored.
* @param procId the procedure to abort
* @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
* @return true if the procedure exists and has received the abort, otherwise false.
*/
public boolean abort(long procId, boolean mayInterruptIfRunning) {
Procedure<TEnvironment> proc = procedures.get(procId);
if (proc != null) {
if (!mayInterruptIfRunning && proc.wasExecuted()) {
return false;
}
return proc.abort(getEnvironment());
}
return false;
} | 3.68 |
flink_ExecutionEnvironment_fromElements | /**
* Creates a new data set that contains the given elements. The framework will determine the
* type according to the based type user supplied. The elements should be the same or be the
* subclass to the based type. The sequence of elements must not be empty. Note that this
* operation will result in a non-parallel data source, i.e. a data source with a parallelism of
* one.
*
* @param type The base class type for every element in the collection.
* @param data The elements to make up the data set.
* @return A DataSet representing the given list of elements.
*/
@SafeVarargs
public final <X> DataSource<X> fromElements(Class<X> type, X... data) {
if (data == null) {
throw new IllegalArgumentException("The data must not be null.");
}
if (data.length == 0) {
throw new IllegalArgumentException("The number of elements must not be zero.");
}
TypeInformation<X> typeInfo;
try {
typeInfo = TypeExtractor.getForClass(type);
} catch (Exception e) {
throw new RuntimeException(
"Could not create TypeInformation for type "
+ type.getName()
+ "; please specify the TypeInformation manually via "
+ "ExecutionEnvironment#fromElements(Collection, TypeInformation)",
e);
}
return fromCollection(Arrays.asList(data), typeInfo, Utils.getCallLocationName());
} | 3.68 |
shardingsphere-elasticjob_JobNodeStorage_removeJobNodeIfExisted | /**
* Remove job node if existed.
*
* @param node node
*/
public void removeJobNodeIfExisted(final String node) {
if (isJobNodeExisted(node)) {
regCenter.remove(jobNodePath.getFullPath(node));
}
} | 3.68 |
pulsar_ThreadLeakDetectorListener_extractRunnableTarget | // use reflection to extract the Runnable target from a thread so that we can detect threads created by
// Testcontainers based on the Runnable's class name.
private static Runnable extractRunnableTarget(Thread thread) {
if (THREAD_TARGET_FIELD == null) {
return null;
}
Runnable target = null;
try {
target = (Runnable) THREAD_TARGET_FIELD.get(thread);
} catch (IllegalAccessException e) {
LOG.warn("Cannot access target field in Thread.class", e);
}
return target;
} | 3.68 |
hudi_HoodieRecordPayload_preCombine | /**
* When more than one HoodieRecord have the same HoodieKey in the incoming batch, this function combines them before attempting to insert/upsert by taking in a schema.
* Implementation can leverage the schema to decide their business logic to do preCombine.
*
* @param oldValue instance of the old {@link HoodieRecordPayload} to be combined with.
* @param schema Payload related schema. For example use schema to overwrite old instance for specified fields that doesn't equal to default value.
* @param properties Payload related properties. For example pass the ordering field(s) name to extract from value in storage.
* @return the combined value
*/
@PublicAPIMethod(maturity = ApiMaturityLevel.EVOLVING)
default T preCombine(T oldValue, Schema schema, Properties properties) {
return preCombine(oldValue, properties);
} | 3.68 |
hadoop_CommitUtils_getS3AFileSystem | /**
* Get the S3A FS of a path.
* @param path path to examine
* @param conf config
* @param magicCommitRequired is magic complete required in the FS?
* @return the filesystem
* @throws PathCommitException output path isn't to an S3A FS instance, or
* if {@code magicCommitRequired} is set, if doesn't support these commits.
* @throws IOException failure to instantiate the FS
*/
public static S3AFileSystem getS3AFileSystem(Path path,
Configuration conf,
boolean magicCommitRequired)
throws PathCommitException, IOException {
S3AFileSystem s3AFS = verifyIsS3AFS(path.getFileSystem(conf), path);
if (magicCommitRequired) {
verifyIsMagicCommitFS(s3AFS);
}
return s3AFS;
} | 3.68 |
hbase_RegionStates_getRegionsOfTableForEnabling | /**
* Get the regions for enabling a table.
* <p/>
* Here we want the EnableTableProcedure to be more robust and can be used to fix some nasty
* states, so the checks in this method will be a bit strange. In general, a region can only be
* offline when it is split, for merging we will just delete the parent regions, but with HBCK we
* may force update the state of a region to fix some nasty bugs, so in this method we will try to
* bring the offline regions back if it is not split. That's why we only check for split state
* here.
*/
public List<RegionInfo> getRegionsOfTableForEnabling(TableName table) {
return getRegionsOfTable(table,
regionNode -> !regionNode.isInState(State.SPLIT) && !regionNode.getRegionInfo().isSplit());
} | 3.68 |
hbase_LruAdaptiveBlockCache_evictBlocksByHfileName | /**
* Evicts all blocks for a specific HFile. This is an expensive operation implemented as a
* linear-time search through all blocks in the cache. Ideally this should be a search in a
* log-access-time map.
* <p>
* This is used for evict-on-close to remove all blocks of a specific HFile.
* @return the number of blocks evicted
*/
@Override
public int evictBlocksByHfileName(String hfileName) {
int numEvicted = (int) map.keySet().stream().filter(key -> key.getHfileName().equals(hfileName))
.filter(this::evictBlock).count();
if (victimHandler != null) {
numEvicted += victimHandler.evictBlocksByHfileName(hfileName);
}
return numEvicted;
} | 3.68 |
graphhopper_OSMReaderConfig_setMaxWayPointDistance | /**
* This parameter affects the routine used to simplify the edge geometries (Ramer-Douglas-Peucker). Higher values mean
* more details are preserved. The default is 1 (meter). Simplification can be disabled by setting it to 0.
*/
public OSMReaderConfig setMaxWayPointDistance(double maxWayPointDistance) {
this.maxWayPointDistance = maxWayPointDistance;
return this;
} | 3.68 |
framework_DragAndDropEvent_getTransferable | /**
* @return the Transferable instance representing the data dragged in this
* drag and drop event
*/
public Transferable getTransferable() {
return transferable;
} | 3.68 |
framework_LoginForm_getLoginParameter | /**
* Gets the login parameter with the given name.
*
* @param name
* the name of the parameter
* @return the value of the parameter or null if no such parameter is
* present
*/
public String getLoginParameter(String name) {
return params.get(name);
} | 3.68 |
graphhopper_OSMReader_processRelation | /**
* This method is called for each relation during the second pass of {@link WaySegmentParser}
* We use it to save the relations and process them afterwards.
*/
protected void processRelation(ReaderRelation relation, LongToIntFunction getIdForOSMNodeId) {
if (turnCostStorage != null)
if (RestrictionConverter.isTurnRestriction(relation)) {
long osmViaNode = RestrictionConverter.getViaNodeIfViaNodeRestriction(relation);
if (osmViaNode >= 0) {
int viaNode = getIdForOSMNodeId.applyAsInt(osmViaNode);
// only include the restriction if the corresponding node wasn't excluded
if (viaNode >= 0) {
relation.setTag("graphhopper:via_node", viaNode);
restrictionRelations.add(relation);
}
} else
// not a via-node restriction -> simply add it as is
restrictionRelations.add(relation);
}
} | 3.68 |
hudi_Registry_getAllCounts | /**
* Get all Counter type metrics.
*/
default Map<String, Long> getAllCounts() {
return getAllCounts(false);
} | 3.68 |
hbase_AccessController_requireScannerOwner | /**
* Verify, when servicing an RPC, that the caller is the scanner owner. If so, we assume that
* access control is correctly enforced based on the checks performed in preScannerOpen()
*/
private void requireScannerOwner(InternalScanner s) throws AccessDeniedException {
if (!RpcServer.isInRpcCallContext()) {
return;
}
String requestUserName = RpcServer.getRequestUserName().orElse(null);
String owner = scannerOwners.get(s);
if (authorizationEnabled && owner != null && !owner.equals(requestUserName)) {
throw new AccessDeniedException("User '" + requestUserName + "' is not the scanner owner!");
}
} | 3.68 |
hudi_AbstractHoodieLogRecordReader_updateBlockSequenceTracker | /**
* Updates map tracking block seq no.
* Here is the map structure.
* Map<String, Map<Long, List<Pair<Integer, HoodieLogBlock>>>> blockSequenceMapPerCommit
* Key: Commit time.
* Value: Map<Long, List<Pair<Integer, HoodieLogBlock>>>>
* Value refers to a Map of different attempts for the commit of interest. List contains the block seq number and the resp HoodieLogBlock.
*
* For eg, if there were two attempts for a file slice while writing(due to spark task retries), here is how the map might look like
* key: commit1
* value : {
* 0L = List = { {0, lb1}, {1, lb2} },
* 1L = List = { {0, lb3}, {1, lb4}, {2, lb5}}
* }
* Meaning: for commit1, there was two attempts with Append Handle while writing. In first attempt, lb1 and lb2 was added. And in 2nd attempt lb3, lb4 and lb5 was added.
* We keep populating this entire map and finally detect spurious log blocks and ignore them.
* In most cases, we might just see one set of sequence for a given commit.
*
* @param logBlock log block of interest to be added.
* @param instantTime commit time of interest.
* @param blockSeqNumber block sequence number.
* @param blockSequenceMapPerCommit map tracking per commit block sequences.
*/
private void updateBlockSequenceTracker(HoodieLogBlock logBlock, String instantTime, int blockSeqNumber, long attemptNumber,
Map<String, Map<Long, List<Pair<Integer, HoodieLogBlock>>>> blockSequenceMapPerCommit,
AtomicBoolean blockIdentifiersPresent) {
if (blockSeqNumber != -1 && attemptNumber != -1) { // update the block sequence tracker for log blocks containing the same.
blockIdentifiersPresent.set(true);
blockSequenceMapPerCommit.computeIfAbsent(instantTime, entry -> new HashMap<>());
Map<Long, List<Pair<Integer, HoodieLogBlock>>> curCommitBlockMap = blockSequenceMapPerCommit.get(instantTime);
if (curCommitBlockMap.containsKey(attemptNumber)) {
// append to existing map entry
curCommitBlockMap.get(attemptNumber).add(Pair.of(blockSeqNumber, logBlock));
} else {
// create a new map entry
curCommitBlockMap.put(attemptNumber, new ArrayList<>());
curCommitBlockMap.get(attemptNumber).add(Pair.of(blockSeqNumber, logBlock));
}
// update the latest to block sequence tracker
blockSequenceMapPerCommit.put(instantTime, curCommitBlockMap);
} else {
// all of older blocks are considered valid. there should be only one list for older commits where block sequence number is not present.
blockSequenceMapPerCommit.computeIfAbsent(instantTime, entry -> new HashMap<>());
Map<Long, List<Pair<Integer, HoodieLogBlock>>> curCommitBlockMap = blockSequenceMapPerCommit.get(instantTime);
curCommitBlockMap.computeIfAbsent(0L, entry -> new ArrayList<>());
curCommitBlockMap.get(0L).add(Pair.of(blockSeqNumber, logBlock));
// update the latest to block sequence tracker
blockSequenceMapPerCommit.put(instantTime, curCommitBlockMap);
}
} | 3.68 |
hadoop_MountTableRefresherService_getClientRemover | /**
* Create cache entry remove listener.
*/
private RemovalListener<String, RouterClient> getClientRemover() {
return new RemovalListener<String, RouterClient>() {
@Override
public void onRemoval(
RemovalNotification<String, RouterClient> notification) {
closeRouterClient(notification.getValue());
}
};
} | 3.68 |
hbase_SplitTableRegionProcedure_getDaughterRegionIdTimestamp | /**
* Calculate daughter regionid to use.
* @param hri Parent {@link RegionInfo}
* @return Daughter region id (timestamp) to use.
*/
private static long getDaughterRegionIdTimestamp(final RegionInfo hri) {
long rid = EnvironmentEdgeManager.currentTime();
// Regionid is timestamp. Can't be less than that of parent else will insert
// at wrong location in hbase:meta (See HBASE-710).
if (rid < hri.getRegionId()) {
LOG.warn("Clock skew; parent regions id is " + hri.getRegionId()
+ " but current time here is " + rid);
rid = hri.getRegionId() + 1;
}
return rid;
} | 3.68 |
zxing_AddressBookResultHandler_mapIndexToAction | // This takes all the work out of figuring out which buttons/actions should be in which
// positions, based on which fields are present in this barcode.
private int mapIndexToAction(int index) {
if (index < buttonCount) {
int count = -1;
for (int x = 0; x < MAX_BUTTON_COUNT; x++) {
if (fields[x]) {
count++;
}
if (count == index) {
return x;
}
}
}
return -1;
} | 3.68 |
hbase_BucketAllocator_roundUpToBucketSizeInfo | /**
* Round up the given block size to bucket size, and get the corresponding BucketSizeInfo
*/
public BucketSizeInfo roundUpToBucketSizeInfo(int blockSize) {
for (int i = 0; i < bucketSizes.length; ++i)
if (blockSize <= bucketSizes[i]) return bucketSizeInfos[i];
return null;
} | 3.68 |
hbase_RingBufferEnvelope_load | /**
* Load the Envelope with {@link RpcCall}
* @param namedQueuePayload all details of rpc call that would be useful for ring buffer consumers
*/
public void load(NamedQueuePayload namedQueuePayload) {
this.namedQueuePayload = namedQueuePayload;
} | 3.68 |
framework_WindowWaiAriaRoles_getTestDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTestDescription()
*/
@Override
protected String getTestDescription() {
return "The alert window should have the role 'alertdialog' and the regular window should have the role 'dialog'";
} | 3.68 |
hbase_PrettyPrinter_toString | /**
* Pretty prints a collection of any type to a string. Relies on toString() implementation of the
* object type.
* @param collection collection to pretty print.
* @return Pretty printed string for the collection.
*/
public static String toString(Collection<?> collection) {
List<String> stringList = new ArrayList<>();
for (Object o : collection) {
stringList.add(Objects.toString(o));
}
return "[" + String.join(",", stringList) + "]";
} | 3.68 |
Activiti_TreeValueExpression_setValue | /**
* Evaluates the expression as an lvalue and assigns the given value.
* @param context used to resolve properties (<code>base.property</code> and <code>base[property]</code>)
* and to perform the assignment to the last base/property pair
* @throws ELException if evaluation fails (e.g. property not found, type conversion failed, assignment failed...)
*/
@Override
public void setValue(ELContext context, Object value) throws ELException {
node.setValue(bindings, context, value);
} | 3.68 |
hadoop_ReservationAllocationState_newInstance | /**
*
* @param acceptanceTime The acceptance time of the reservation.
* @param user The username of the user who made the reservation.
* @param resourceAllocations List of {@link ResourceAllocationRequest}
* representing the current state of the
* reservation resource allocations. This is
* subject to change in the event of re-planning.
* @param reservationId {@link ReservationId } of the reservation being
* listed.
* @param reservationDefinition {@link ReservationDefinition} used to make
* the reservation.
* @return {@code ReservationAllocationState} that represents the state of
* the reservation.
*/
@Public
@Stable
public static ReservationAllocationState newInstance(long acceptanceTime,
String user, List<ResourceAllocationRequest> resourceAllocations,
ReservationId reservationId,
ReservationDefinition reservationDefinition) {
ReservationAllocationState ri = Records.newRecord(
ReservationAllocationState.class);
ri.setAcceptanceTime(acceptanceTime);
ri.setUser(user);
ri.setResourceAllocationRequests(resourceAllocations);
ri.setReservationId(reservationId);
ri.setReservationDefinition(reservationDefinition);
return ri;
} | 3.68 |
flink_PermanentBlobCache_getStorageLocation | /**
* Returns a file handle to the file associated with the given blob key on the blob server.
*
* @param jobId ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated)
* @param key identifying the file
* @return file handle to the file
* @throws IOException if creating the directory fails
*/
@VisibleForTesting
public File getStorageLocation(JobID jobId, BlobKey key) throws IOException {
checkNotNull(jobId);
return BlobUtils.getStorageLocation(storageDir.deref(), jobId, key);
} | 3.68 |
hbase_RegionCoprocessorHost_preFlush | /**
* Invoked before a memstore flush
*/
public void preFlush(FlushLifeCycleTracker tracker) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {
observer.preFlush(this, tracker);
}
});
} | 3.68 |
morf_CaseInsensitiveString_readResolve | /**
* When deserializing, resolve via the static factory. This prevents us getting duplicate
* instances.
*
* @return The interned instance.
*/
private Object readResolve() {
return of(string);
} | 3.68 |
flink_Over_partitionBy | /**
* Partitions the elements on some partition keys.
*
* <p>Each partition is individually sorted and aggregate functions are applied to each
* partition separately.
*
* @param partitionBy list of field references
* @return an over window with defined partitioning
*/
public static OverWindowPartitioned partitionBy(Expression... partitionBy) {
return new OverWindowPartitioned(Arrays.asList(partitionBy));
} | 3.68 |
hibernate-validator_ResourceBundleMessageInterpolator_canLoadExpressionFactory | /**
* Instead of testing the different class loaders via {@link ELManager}, we directly access the
* {@link ExpressionFactory}. This avoids issues with loading the {@code ELUtil} class (used by {@code ELManager})
* after a failed attempt.
*/
private static boolean canLoadExpressionFactory() {
try {
ExpressionFactory.newInstance();
return true;
}
catch (Throwable e) {
LOG.debugv( e, "Failed to load expression factory via classloader {0}",
run( GetClassLoader.fromContext() ) );
return false;
}
} | 3.68 |
pulsar_PulsarAdminImpl_clusters | /**
* @return the clusters management object
*/
public Clusters clusters() {
return clusters;
} | 3.68 |
dubbo_AbstractConnectionClient_release | /**
* Decreases the reference count by 1 and calls {@link this#destroy} if the reference count reaches 0.
*/
public final boolean release() {
long remainingCount = COUNTER_UPDATER.decrementAndGet(this);
if (remainingCount == 0) {
destroy();
return true;
} else if (remainingCount <= -1) {
logger.warn(PROTOCOL_ERROR_CLOSE_CLIENT, "", "", "This instance has been destroyed");
return false;
} else {
return false;
}
} | 3.68 |
flink_ProjectOperator_projectTuple10 | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>
ProjectOperator<T, Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>>
projectTuple10() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>> tType =
new TupleTypeInfo<Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>>(fTypes);
return new ProjectOperator<T, Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>>(
this.ds, this.fieldIndexes, tType);
} | 3.68 |
pulsar_ClassLoaderUtils_loadJar | /**
* Load a jar.
*
* @param jar file of jar
* @return classloader
* @throws MalformedURLException
*/
public static ClassLoader loadJar(File jar) throws MalformedURLException {
java.net.URL url = jar.toURI().toURL();
return AccessController.doPrivileged(
(PrivilegedAction<URLClassLoader>) () -> new URLClassLoader(new URL[]{url}));
} | 3.68 |
morf_AddColumn_getTableName | /**
* @return the name of the table to add the column to.
*/
public String getTableName() {
return tableName;
} | 3.68 |
Activiti_TablePage_getRows | /**
* @return the actual table content.
*/
public List<Map<String, Object>> getRows() {
return rowData;
} | 3.68 |
hadoop_Event_getSymlinkTarget | /**
* Symlink target is null if the CreateEvent iNodeType is not symlink.
*/
public String getSymlinkTarget() {
return symlinkTarget;
} | 3.68 |
hadoop_CryptoUtils_wrapIfNecessary | /**
* Wraps a given FSDataInputStream with a CryptoInputStream. The size of the
* data buffer required for the stream is specified by the
* "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration
* variable.
*
* @param conf configuration
* @param in given input stream
* @return FSDataInputStream encrypted input stream if encryption is
* enabled; otherwise the given input stream itself
* @throws IOException exception in case of error
*/
public static FSDataInputStream wrapIfNecessary(Configuration conf,
FSDataInputStream in) throws IOException {
if (isEncryptedSpillEnabled(conf)) {
CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf);
int bufferSize = getBufferSize(conf);
// Not going to be used... but still has to be read...
// Since the O/P stream always writes it..
IOUtils.readFully(in, new byte[8], 0, 8);
byte[] iv =
new byte[cryptoCodec.getCipherSuite().getAlgorithmBlockSize()];
IOUtils.readFully(in, iv, 0,
cryptoCodec.getCipherSuite().getAlgorithmBlockSize());
if (LOG.isDebugEnabled()) {
LOG.debug("IV read from Stream ["
+ Base64.encodeBase64URLSafeString(iv) + "]");
}
return new CryptoFSDataInputStream(in, cryptoCodec, bufferSize,
getEncryptionKey(), iv);
} else {
return in;
}
} | 3.68 |
hadoop_CloseableTaskPoolSubmitter_getPool | /**
* Get the pool.
* @return the pool.
*/
public ExecutorService getPool() {
return pool;
} | 3.68 |
hadoop_AclUtil_getAclFromPermAndEntries | /**
* Given permissions and extended ACL entries, returns the full logical ACL.
*
* @param perm FsPermission containing permissions
* @param entries List<AclEntry> containing extended ACL entries
* @return List<AclEntry> containing full logical ACL
*/
public static List<AclEntry> getAclFromPermAndEntries(FsPermission perm,
List<AclEntry> entries) {
List<AclEntry> acl = Lists.newArrayListWithCapacity(entries.size() + 3);
// Owner entry implied by owner permission bits.
acl.add(new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.USER)
.setPermission(perm.getUserAction())
.build());
// All extended access ACL entries.
boolean hasAccessAcl = false;
Iterator<AclEntry> entryIter = entries.iterator();
AclEntry curEntry = null;
while (entryIter.hasNext()) {
curEntry = entryIter.next();
if (curEntry.getScope() == AclEntryScope.DEFAULT) {
break;
}
hasAccessAcl = true;
acl.add(curEntry);
}
// Mask entry implied by group permission bits, or group entry if there is
// no access ACL (only default ACL).
acl.add(new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(hasAccessAcl ? AclEntryType.MASK : AclEntryType.GROUP)
.setPermission(perm.getGroupAction())
.build());
// Other entry implied by other bits.
acl.add(new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.OTHER)
.setPermission(perm.getOtherAction())
.build());
// Default ACL entries.
if (curEntry != null && curEntry.getScope() == AclEntryScope.DEFAULT) {
acl.add(curEntry);
while (entryIter.hasNext()) {
acl.add(entryIter.next());
}
}
return acl;
} | 3.68 |
flink_AbstractBytesHashMap_getEntryIterator | /** Returns an iterator for iterating over the entries of this map. */
@SuppressWarnings("WeakerAccess")
public KeyValueIterator<K, BinaryRowData> getEntryIterator(boolean requiresCopy) {
if (destructiveIterator != null) {
throw new IllegalArgumentException(
"DestructiveIterator is not null, so this method can't be invoke!");
}
return ((RecordArea) recordArea).entryIterator(requiresCopy);
} | 3.68 |
hudi_HoodieAppendHandle_needsUpdateLocation | /**
* Whether there is need to update the record location.
*/
protected boolean needsUpdateLocation() {
return true;
} | 3.68 |
hbase_BlockType_isIndex | /** Returns whether this block category is index */
public final boolean isIndex() {
return this.getCategory() == BlockCategory.INDEX;
} | 3.68 |
hbase_MiniHBaseCluster_getServerWithMeta | /**
* @return Index into List of {@link MiniHBaseCluster#getRegionServerThreads()} of HRS carrying
* regionName. Returns -1 if none found.
*/
public int getServerWithMeta() {
return getServerWith(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName());
} | 3.68 |
hudi_BaseHoodieTableServiceClient_rollbackFailedIndexingCommits | /**
* Rolls back the failed delta commits corresponding to the indexing action.
* Such delta commits are identified based on the suffix `METADATA_INDEXER_TIME_SUFFIX` ("004").
* <p>
* TODO(HUDI-5733): This should be cleaned up once the proper fix of rollbacks
* in the metadata table is landed.
*
* @return {@code true} if rollback happens; {@code false} otherwise.
*/
protected boolean rollbackFailedIndexingCommits() {
HoodieTable table = createTable(config, hadoopConf);
List<String> instantsToRollback = getFailedIndexingCommitsToRollback(table.getMetaClient());
Map<String, Option<HoodiePendingRollbackInfo>> pendingRollbacks = getPendingRollbackInfos(table.getMetaClient());
instantsToRollback.forEach(entry -> pendingRollbacks.putIfAbsent(entry, Option.empty()));
rollbackFailedWrites(pendingRollbacks);
return !pendingRollbacks.isEmpty();
} | 3.68 |
hbase_MasterObserver_postListRSGroups | /**
* Called after listing region server group information.
* @param ctx the environment to interact with the framework and master
*/
default void postListRSGroups(final ObserverContext<MasterCoprocessorEnvironment> ctx)
throws IOException {
} | 3.68 |
flink_StateHandleStoreUtils_serializeOrDiscard | /**
* Serializes the passed {@link StateObject} and discards the state in case of failure.
*
* @param stateObject the {@code StateObject} that shall be serialized.
* @return The serialized version of the passed {@code StateObject}.
* @throws Exception if an error occurred during the serialization. The corresponding {@code
* StateObject} will be discarded in that case.
*/
public static byte[] serializeOrDiscard(StateObject stateObject) throws Exception {
try {
return InstantiationUtil.serializeObject(stateObject);
} catch (Exception e) {
try {
stateObject.discardState();
} catch (Exception discardException) {
e.addSuppressed(discardException);
}
ExceptionUtils.rethrowException(e);
}
// will never happen but is added to please the compiler
return new byte[0];
} | 3.68 |
flink_EnvironmentSettings_getBuiltInCatalogName | /**
* Gets the specified name of the initial catalog to be created when instantiating a {@link
* TableEnvironment}.
*/
public String getBuiltInCatalogName() {
return configuration.get(TABLE_CATALOG_NAME);
} | 3.68 |
morf_FieldLiteral_equals | /**
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
FieldLiteral other = (FieldLiteral) obj;
return new EqualsBuilder()
.appendSuper(super.equals(obj))
.append(this.value, other.value)
.append(this.dataType, other.dataType)
.isEquals();
} | 3.68 |
morf_WindowFunction_getFunction | /**
* @return the function.
*/
public Function getFunction() {
return function;
} | 3.68 |
hadoop_CachedSASToken_update | /**
* Updates the cached SAS token and expiry. If the token is invalid, the cached value
* is cleared by setting it to null and the expiry to MIN.
* @param token an Azure Storage SAS token
*/
public void update(String token) {
// quickly return if token and cached sasToken are the same reference
// Note: use of operator == is intentional
if (token == sasToken) {
return;
}
OffsetDateTime newExpiry = getExpiry(token);
boolean isInvalid = isNearExpiry(newExpiry, minExpirationInSeconds);
synchronized (this) {
if (isInvalid) {
sasToken = null;
sasExpiry = OffsetDateTime.MIN;
} else {
sasToken = token;
sasExpiry = newExpiry;
}
}
} | 3.68 |
querydsl_ExpressionUtils_regexToLike | /**
* Convert the given expression from regex form to like
*
* @param expr expression to convert
* @return converted expression
*/
@SuppressWarnings("unchecked")
public static Expression<String> regexToLike(Expression<String> expr) {
if (expr instanceof Constant<?>) {
final String str = expr.toString();
final StringBuilder rv = new StringBuilder(str.length() + 2);
boolean escape = false;
for (int i = 0; i < str.length(); i++) {
final char ch = str.charAt(i);
if (!escape && ch == '.') {
if (i < str.length() - 1 && str.charAt(i + 1) == '*') {
rv.append('%');
i++;
} else {
rv.append('_');
}
continue;
} else if (!escape && ch == '\\') {
escape = true;
continue;
} else if (!escape && (ch == '[' || ch == ']' || ch == '^' || ch == '.' || ch == '*')) {
throw new QueryException("'" + str + "' can't be converted to like form");
} else if (escape && (ch == 'd' || ch == 'D' || ch == 's' || ch == 'S' || ch == 'w' || ch == 'W')) {
throw new QueryException("'" + str + "' can't be converted to like form");
}
rv.append(ch);
escape = false;
}
if (!rv.toString().equals(str)) {
return ConstantImpl.create(rv.toString());
}
} else if (expr instanceof Operation<?>) {
Operation<?> o = (Operation<?>) expr;
if (o.getOperator() == Ops.CONCAT) {
Expression<String> lhs = regexToLike((Expression<String>) o.getArg(0));
Expression<String> rhs = regexToLike((Expression<String>) o.getArg(1));
if (lhs != o.getArg(0) || rhs != o.getArg(1)) {
return operation(String.class, Ops.CONCAT, lhs, rhs);
}
}
}
return expr;
} | 3.68 |
hudi_BaseClusterer_updateWriteClient | /**
* Update the write client used by async clustering.
* @param writeClient
*/
public void updateWriteClient(BaseHoodieWriteClient<T, I, K, O> writeClient) {
this.clusteringClient = writeClient;
} | 3.68 |
hudi_HoodieTableMetaClient_getActiveTimeline | /**
* Get the active instants as a timeline.
*
* @return Active instants timeline
*/
public synchronized HoodieActiveTimeline getActiveTimeline() {
if (activeTimeline == null) {
activeTimeline = new HoodieActiveTimeline(this);
}
return activeTimeline;
} | 3.68 |
flink_TGetQueryIdResp_findByThriftIdOrThrow | /**
* Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null)
throw new java.lang.IllegalArgumentException(
"Field " + fieldId + " doesn't exist!");
return fields;
} | 3.68 |
hadoop_AbstractTask_setTaskCmd | /**
* Set TaskCmd for a Task.
* @param taskCMD : Task command line
*/
@Override
public final void setTaskCmd(final String taskCMD) {
this.taskCmd = taskCMD;
} | 3.68 |
flink_StatusWatermarkValve_inputWatermark | /**
* Feed a {@link Watermark} into the valve. If the input triggers the valve to output a new
* Watermark, {@link DataOutput#emitWatermark(Watermark)} will be called to process the new
* Watermark.
*
* @param watermark the watermark to feed to the valve
* @param channelIndex the index of the channel that the fed watermark belongs to (index
* starting from 0)
*/
public void inputWatermark(Watermark watermark, int channelIndex, DataOutput<?> output)
throws Exception {
// ignore the input watermark if its input channel, or all input channels are idle (i.e.
// overall the valve is idle).
if (lastOutputWatermarkStatus.isActive()
&& channelStatuses[channelIndex].watermarkStatus.isActive()) {
long watermarkMillis = watermark.getTimestamp();
// if the input watermark's value is less than the last received watermark for its input
// channel, ignore it also.
if (watermarkMillis > channelStatuses[channelIndex].watermark) {
channelStatuses[channelIndex].watermark = watermarkMillis;
if (channelStatuses[channelIndex].isWatermarkAligned) {
adjustAlignedChannelStatuses(channelStatuses[channelIndex]);
} else if (watermarkMillis >= lastOutputWatermark) {
// previously unaligned input channels are now aligned if its watermark has
// caught up
markWatermarkAligned(channelStatuses[channelIndex]);
}
// now, attempt to find a new min watermark across all aligned channels
findAndOutputNewMinWatermarkAcrossAlignedChannels(output);
}
}
} | 3.68 |
flink_NumericSummaryAggregator_aggregate | /** Add a value to the current aggregation. */
@Override
public void aggregate(T value) {
if (value == null) {
nullCount++;
} else if (isNan(value)) {
nanCount++;
} else if (isInfinite(value)) {
infinityCount++;
} else {
nonMissingCount++;
min.aggregate(value);
max.aggregate(value);
sum.aggregate(value);
double doubleValue = value.doubleValue();
double delta = doubleValue - mean.value();
mean = mean.add(delta / nonMissingCount);
m2 = m2.add(delta * (doubleValue - mean.value()));
}
} | 3.68 |
hbase_OffPeakHours_getInstance | /**
* @param startHour inclusive
* @param endHour exclusive
*/
public static OffPeakHours getInstance(int startHour, int endHour) {
if (startHour == -1 && endHour == -1) {
return DISABLED;
}
if (!isValidHour(startHour) || !isValidHour(endHour)) {
if (LOG.isWarnEnabled()) {
LOG.warn("Ignoring invalid start/end hour for peak hour : start = " + startHour + " end = "
+ endHour + ". Valid numbers are [0-23]");
}
return DISABLED;
}
if (startHour == endHour) {
return DISABLED;
}
return new OffPeakHoursImpl(startHour, endHour);
} | 3.68 |
hbase_FavoredStochasticBalancer_roundRobinAssignment | /**
* Round robin assignment: Segregate the regions into two types: 1. The regions that have favored
* node assignment where at least one of the favored node is still alive. In this case, try to
* adhere to the current favored nodes assignment as much as possible - i.e., if the current
* primary is gone, then make the secondary or tertiary as the new host for the region (based on
* their current load). Note that we don't change the favored node assignments here (even though
* one or more favored node is currently down). That will be done by the admin operations. 2. The
* regions that currently don't have favored node assignments. Generate favored nodes for them and
* then assign. Generate the primary fn in round robin fashion and generate secondary and tertiary
* as per favored nodes constraints.
*/
@Override
@NonNull
public Map<ServerName, List<RegionInfo>> roundRobinAssignment(List<RegionInfo> regions,
List<ServerName> servers) throws HBaseIOException {
metricsBalancer.incrMiscInvocations();
Map<ServerName, List<RegionInfo>> assignmentMap = new HashMap<>();
if (regions.isEmpty()) {
return assignmentMap;
}
Set<RegionInfo> regionSet = new HashSet<>(regions);
try {
FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
helper.initialize();
Set<RegionInfo> systemRegions = FavoredNodesManager.filterNonFNApplicableRegions(regionSet);
regionSet.removeAll(systemRegions);
// Assign all system regions
Map<ServerName, List<RegionInfo>> systemAssignments =
super.roundRobinAssignment(Lists.newArrayList(systemRegions), servers);
// Segregate favored and non-favored nodes regions and assign accordingly.
Pair<Map<ServerName, List<RegionInfo>>, List<RegionInfo>> segregatedRegions =
segregateRegionsAndAssignRegionsWithFavoredNodes(regionSet, servers);
Map<ServerName, List<RegionInfo>> regionsWithFavoredNodesMap = segregatedRegions.getFirst();
Map<ServerName, List<RegionInfo>> regionsWithoutFN =
generateFNForRegionsWithoutFN(helper, segregatedRegions.getSecond());
// merge the assignment maps
mergeAssignmentMaps(assignmentMap, systemAssignments);
mergeAssignmentMaps(assignmentMap, regionsWithFavoredNodesMap);
mergeAssignmentMaps(assignmentMap, regionsWithoutFN);
} catch (Exception ex) {
throw new HBaseIOException("Encountered exception while doing favored-nodes assignment " + ex
+ " Falling back to regular assignment", ex);
}
return assignmentMap;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.