name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_PlacementConstraints_allocationTag | /**
* Constructs a target expression on an allocation tag. It is satisfied if
* there are allocations with one of the given tags. The default namespace
* for these tags is {@link AllocationTagNamespaceType#SELF}, this only
* checks tags within the application.
*
* @param allocationTags the set of tags that the attribute should take
* values from
* @return the resulting expression on the allocation tags
*/
public static TargetExpression allocationTag(String... allocationTags) {
return allocationTagWithNamespace(
AllocationTagNamespaceType.SELF.toString(), allocationTags);
} | 3.68 |
hbase_HMaster_getLoadBalancerClassName | /**
* Fetch the configured {@link LoadBalancer} class name. If none is set, a default is returned.
* <p/>
* Notice that, the base load balancer will always be {@link RSGroupBasedLoadBalancer} now, so
* this method will return the balancer used inside each rs group.
* @return The name of the {@link LoadBalancer} in use.
*/
public String getLoadBalancerClassName() {
return conf.get(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
LoadBalancerFactory.getDefaultLoadBalancerClass().getName());
} | 3.68 |
dubbo_ConfigurableSourceBeanMetadataElement_setSource | /**
* Set the source into the specified {@link BeanMetadataElement}
*
* @param beanMetadataElement {@link BeanMetadataElement} instance
*/
default void setSource(BeanMetadataElement beanMetadataElement) {
if (beanMetadataElement instanceof BeanMetadataAttributeAccessor) {
((BeanMetadataAttributeAccessor) beanMetadataElement).setSource(this);
}
} | 3.68 |
morf_Upgrade_resultSetProcessor | /**
* Returns a ResultSetProcessor that processes a ResultSet into a Map<String, String>.
*
* @return A ResultSetProcessor that converts a ResultSet into a Map<String, String>.
*/
private ResultSetProcessor<Map<String, String>> resultSetProcessor() {
return resultSet -> {
Map<String, String> upgradeAudit = new HashMap<>();
try {
while(resultSet.next()) {
String description = resultSet.getString("description");
String upgradeUUID = resultSet.getString("upgradeUUID");
upgradeAudit.put(description, upgradeUUID);
}
} catch (SQLException e) {
e.printStackTrace();
}
return upgradeAudit;
};
} | 3.68 |
morf_Escaping_isCharValidForXml | /**
* https://www.w3.org/TR/xml/#NT-Char
*/
static boolean isCharValidForXml(int c) {
return
c >= 0x20 && c <= 0xd7ff
||
c >= 0xe000 && c <= 0xfffd
||
c == 0x9 || c == 0xd || c == 0xa;
} | 3.68 |
framework_VLoadingIndicator_getSecondDelay | /**
* Returns the delay (in ms) which must pass before the loading indicator
* moves to its "second" state.
*
* @return The delay (in ms) until the loading indicator moves into its
* "second" state. Counted from when {@link #trigger()} is called.
*/
public int getSecondDelay() {
return secondDelay;
} | 3.68 |
hadoop_EchoUserResolver_needsTargetUsersList | /**
* {@inheritDoc}
* <br><br>
* Since {@link EchoUserResolver} simply returns the user's name passed as
* the argument, it doesn't need a target list of users.
*/
public boolean needsTargetUsersList() {
return false;
} | 3.68 |
flink_FutureUtils_delay | /**
* Delay the given action by the given delay.
*
* @param runnable to execute after the given delay
* @param delay after which to execute the runnable
* @param timeUnit time unit of the delay
* @return Future of the scheduled action
*/
private static ScheduledFuture<?> delay(Runnable runnable, long delay, TimeUnit timeUnit) {
checkNotNull(runnable);
checkNotNull(timeUnit);
return DELAYER.schedule(runnable, delay, timeUnit);
} | 3.68 |
dubbo_AbstractAnnotationBeanPostProcessor_findFieldAnnotationMetadata | /**
* Finds {@link InjectionMetadata.InjectedElement} Metadata from annotated fields
*
* @param beanClass The {@link Class} of Bean
* @return non-null {@link List}
*/
private List<AbstractAnnotationBeanPostProcessor.AnnotatedFieldElement> findFieldAnnotationMetadata(
final Class<?> beanClass) {
final List<AbstractAnnotationBeanPostProcessor.AnnotatedFieldElement> elements = new LinkedList<>();
ReflectionUtils.doWithFields(beanClass, field -> {
for (Class<? extends Annotation> annotationType : getAnnotationTypes()) {
AnnotationAttributes attributes =
AnnotationUtils.getAnnotationAttributes(field, annotationType, getEnvironment(), true, true);
if (attributes != null) {
if (Modifier.isStatic(field.getModifiers())) {
if (logger.isWarnEnabled()) {
logger.warn(
CONFIG_DUBBO_BEAN_INITIALIZER,
"",
"",
"@" + annotationType.getName() + " is not supported on static fields: " + field);
}
return;
}
elements.add(new AnnotatedFieldElement(field, attributes));
}
}
});
return elements;
} | 3.68 |
hbase_ZKUtil_deleteNodeFailSilent | /** Returns a deleteNodeFailSilent ZKUtilOP */
public static ZKUtilOp deleteNodeFailSilent(String path) {
return new DeleteNodeFailSilent(path);
} | 3.68 |
AreaShop_RentRegion_getRenter | /**
* Get the UUID of the player renting the region.
* @return The UUID of the renter
*/
public UUID getRenter() {
String renter = config.getString("rent.renter");
if(renter != null) {
try {
return UUID.fromString(renter);
} catch(IllegalArgumentException e) {
// Incorrect UUID
}
}
return null;
} | 3.68 |
framework_Form_setTabIndex | /**
* Sets the Tabulator index of this Focusable component.
*
* @see Component.Focusable#setTabIndex(int)
*/
@Override
public void setTabIndex(int tabIndex) {
super.setTabIndex(tabIndex);
for (final Object id : getItemPropertyIds()) {
getField(id).setTabIndex(tabIndex);
}
} | 3.68 |
flink_TableChange_modifyPhysicalColumnType | /**
* A table change that modify the physical column data type.
*
* <p>It is equal to the following statement:
*
* <pre>
* ALTER TABLE <table_name> MODIFY <column_name> <new_column_type>
* </pre>
*
* @param oldColumn the definition of the old column.
* @param newType the type of the new column.
* @return a TableChange represents the modification.
*/
static ModifyPhysicalColumnType modifyPhysicalColumnType(Column oldColumn, DataType newType) {
return new ModifyPhysicalColumnType(oldColumn, newType);
} | 3.68 |
flink_ExecNodeConfig_shouldSetUid | /** @return Whether transformations should set a UID. */
public boolean shouldSetUid() {
final UidGeneration uidGeneration = get(ExecutionConfigOptions.TABLE_EXEC_UID_GENERATION);
switch (uidGeneration) {
case PLAN_ONLY:
return isCompiled
&& !get(ExecutionConfigOptions.TABLE_EXEC_LEGACY_TRANSFORMATION_UIDS);
case ALWAYS:
return true;
case DISABLED:
return false;
default:
throw new IllegalArgumentException(
"Unknown UID generation strategy: " + uidGeneration);
}
} | 3.68 |
hadoop_MultipleOutputFormat_getInputFileBasedOutputFileName | /**
* Generate the outfile name based on a given name and the input file name. If
* the {@link JobContext#MAP_INPUT_FILE} does not exists (i.e. this is not for a map only job),
* the given name is returned unchanged. If the config value for
* "num.of.trailing.legs.to.use" is not set, or set 0 or negative, the given
* name is returned unchanged. Otherwise, return a file name consisting of the
* N trailing legs of the input file name where N is the config value for
* "num.of.trailing.legs.to.use".
*
* @param job
* the job config
* @param name
* the output file name
* @return the outfile name based on a given name and the input file name.
*/
protected String getInputFileBasedOutputFileName(JobConf job, String name) {
String infilepath = job.get(MRJobConfig.MAP_INPUT_FILE);
if (infilepath == null) {
// if the {@link JobContext#MAP_INPUT_FILE} does not exists,
// then return the given name
return name;
}
int numOfTrailingLegsToUse = job.getInt("mapred.outputformat.numOfTrailingLegs", 0);
if (numOfTrailingLegsToUse <= 0) {
return name;
}
Path infile = new Path(infilepath);
Path parent = infile.getParent();
String midName = infile.getName();
Path outPath = new Path(midName);
for (int i = 1; i < numOfTrailingLegsToUse; i++) {
if (parent == null) break;
midName = parent.getName();
if (midName.length() == 0) break;
parent = parent.getParent();
outPath = new Path(midName, outPath);
}
return outPath.toString();
} | 3.68 |
graphhopper_TarjanSCC_getBiggestComponent | /**
* A reference to the biggest component contained in {@link #getComponents()} or an empty list if there are
* either no components or the biggest component has only a single node (and hence {@link #getComponents()} is
* empty).
*/
public IntArrayList getBiggestComponent() {
return biggestComponent;
} | 3.68 |
flink_CastRuleProvider_create | /**
* Create a {@link CastExecutor} for the provided input type and target type. Returns {@code
* null} if no rule can be resolved.
*
* @see CastRule#create(CastRule.Context, LogicalType, LogicalType)
*/
public static @Nullable CastExecutor<?, ?> create(
CastRule.Context context, LogicalType inputLogicalType, LogicalType targetLogicalType) {
CastRule<?, ?> rule = INSTANCE.internalResolve(inputLogicalType, targetLogicalType);
if (rule == null) {
return null;
}
return rule.create(context, inputLogicalType, targetLogicalType);
} | 3.68 |
flink_TypeExtractor_getForClass | /**
* Creates type information from a given Class such as Integer, String[] or POJOs.
*
* <p>This method does not support ParameterizedTypes such as Tuples or complex type
* hierarchies. In most cases {@link TypeExtractor#createTypeInfo(Type)} is the recommended
* method for type extraction (a Class is a child of Type).
*
* @param clazz a Class to create TypeInformation for
* @return TypeInformation that describes the passed Class
*/
public static <X> TypeInformation<X> getForClass(Class<X> clazz) {
final List<Type> typeHierarchy = new ArrayList<>();
typeHierarchy.add(clazz);
return new TypeExtractor().privateGetForClass(clazz, typeHierarchy);
} | 3.68 |
flink_SkipListUtils_isNodeRemoved | /**
* Whether the node has been logically removed.
*
* @param node the node to check against
* @param spaceAllocator the space allocator
* @return true if the node has been logically removed.
*/
static boolean isNodeRemoved(long node, Allocator spaceAllocator) {
if (node == NIL_NODE) {
return false;
}
Chunk chunk = spaceAllocator.getChunkById(SpaceUtils.getChunkIdByAddress(node));
int offsetInChunk = SpaceUtils.getChunkOffsetByAddress(node);
MemorySegment segment = chunk.getMemorySegment(offsetInChunk);
int offsetInByteBuffer = chunk.getOffsetInSegment(offsetInChunk);
return getNodeStatus(segment, offsetInByteBuffer) == NodeStatus.REMOVE;
} | 3.68 |
hadoop_ContentCounts_getStoragespace | // Get the total of storage space usage in bytes including replication.
public long getStoragespace() {
return contents.get(Content.DISKSPACE);
} | 3.68 |
hadoop_HeaderProcessing_listXAttrs | /**
* See {@code FileSystem.listXAttrs(path)}.
* @param path Path to get extended attributes
* @return List of supported XAttrs
* @throws IOException IO failure
*/
public List<String> listXAttrs(final Path path) throws IOException {
return new ArrayList<>(retrieveHeaders(path, INVOCATION_OP_XATTR_LIST)
.keySet());
} | 3.68 |
dubbo_ProtocolBuilder_path | /**
* @param path
* @return ProtocolBuilder
* @see ProtocolBuilder#contextpath(String)
*/
@Deprecated
public ProtocolBuilder path(String path) {
this.contextpath = path;
return getThis();
} | 3.68 |
framework_TreeGrid_scrollTo | /**
* This method is inherited from Grid but should never be called directly
* with a TreeGrid.
*/
@Deprecated
@Override
public void scrollTo(int row, ScrollDestination destination) {
super.scrollTo(row, destination);
} | 3.68 |
morf_SqlDialect_nextIdValue | /**
* Creates a field reference to provide id column values.
*
* @param sourceTable the source table.
* @param sourceReference a reference lookup to add the ID to.
* @param autoNumberTable the name of the table to query over.
* @param nameColumn the name of the column holding the Autonumber name.
* @param valueColumn the name of the column holding the Autonumber value.
* @return a field reference.
*/
public AliasedField nextIdValue(TableReference sourceTable, TableReference sourceReference, Table autoNumberTable, String nameColumn, String valueColumn) {
String autoNumberName = getAutoNumberName(sourceTable.getName());
if (sourceReference == null) {
return new FieldFromSelect(new SelectStatement(Function.coalesce(new FieldReference(valueColumn), new FieldLiteral(1))).from(
new TableReference(autoNumberTable.getName(), autoNumberTable.isTemporary())).where(
new Criterion(Operator.EQ, new FieldReference(nameColumn), autoNumberName)));
} else {
return new MathsField(new FieldFromSelect(new SelectStatement(Function.coalesce(new FieldReference(valueColumn),
new FieldLiteral(0))).from(new TableReference(autoNumberTable.getName(), autoNumberTable.isTemporary())).where(
new Criterion(Operator.EQ, new FieldReference(nameColumn), autoNumberName))), MathsOperator.PLUS, new FieldReference(
sourceReference, "id"));
}
} | 3.68 |
flink_ResourceProfile_getCpuCores | /**
* Get the cpu cores needed.
*
* @return The cpu cores, 1.0 means a full cpu thread
*/
public CPUResource getCpuCores() {
throwUnsupportedOperationExceptionIfUnknown();
return cpuCores;
} | 3.68 |
framework_Form_setVisibleItemProperties | /**
* Sets the visibleProperties.
*
* @param visibleProperties
* the visibleProperties to set.
*/
public void setVisibleItemProperties(Object... visibleProperties) {
LinkedList<Object> v = new LinkedList<Object>();
for (int i = 0; i < visibleProperties.length; i++) {
v.add(visibleProperties[i]);
}
setVisibleItemProperties(v);
} | 3.68 |
hmily_MongodbTemplateService_update | /**
* update.
* @param c type.
* @param conditions where conditions.
* @param newData set paramters.
* @return line count.
*/
public int update(final Class c, final Criteria conditions, final Pair<String, Object>... newData) {
Update update = new Update();
for (Pair<String, Object> p:newData) {
update.set(p.getKey(), p.getValue());
}
return (int) updateFirst(new Query().addCriteria(conditions), update, c).getModifiedCount();
} | 3.68 |
querydsl_GenericExporter_setTypeMappingsClass | /**
* Set the typemappings class to be used
*
* @param typeMappingsClass
*/
public void setTypeMappingsClass(Class<? extends TypeMappings> typeMappingsClass) {
codegenModule.bind(TypeMappings.class, typeMappingsClass);
} | 3.68 |
hbase_StoreUtils_getFileSplitPoint | /**
* Gets the approximate mid-point of the given file that is optimal for use in splitting it.
* @param file the store file
* @param comparator Comparator used to compare KVs.
* @return The split point row, or null if splitting is not possible, or reader is null.
*/
static Optional<byte[]> getFileSplitPoint(HStoreFile file, CellComparator comparator)
throws IOException {
StoreFileReader reader = file.getReader();
if (reader == null) {
LOG.warn("Storefile " + file + " Reader is null; cannot get split point");
return Optional.empty();
}
// Get first, last, and mid keys. Midkey is the key that starts block
// in middle of hfile. Has column and timestamp. Need to return just
// the row we want to split on as midkey.
Optional<Cell> optionalMidKey = reader.midKey();
if (!optionalMidKey.isPresent()) {
return Optional.empty();
}
Cell midKey = optionalMidKey.get();
Cell firstKey = reader.getFirstKey().get();
Cell lastKey = reader.getLastKey().get();
// if the midkey is the same as the first or last keys, we cannot (ever) split this region.
if (
comparator.compareRows(midKey, firstKey) == 0 || comparator.compareRows(midKey, lastKey) == 0
) {
if (LOG.isDebugEnabled()) {
LOG.debug("cannot split {} because midkey is the same as first or last row", file);
}
return Optional.empty();
}
return Optional.of(CellUtil.cloneRow(midKey));
} | 3.68 |
framework_DesignAttributeHandler_assignValue | /**
* Assigns the specified design attribute to the given component.
*
* @param target
* the target to which the attribute should be set
* @param attribute
* the name of the attribute to be set
* @param value
* the string value of the attribute
* @return true on success
*/
public static boolean assignValue(Object target, String attribute,
String value) {
if (target == null || attribute == null || value == null) {
throw new IllegalArgumentException(
"Parameters with null value not allowed");
}
boolean success = false;
try {
Method setter = findSetterForAttribute(target.getClass(),
attribute);
if (setter == null) {
// if we don't have the setter, there is no point in continuing
success = false;
} else {
// we have a value from design attributes, let's use that
Type[] types = GenericTypeReflector
.getExactParameterTypes(setter, target.getClass());
Object param = getFormatter().parse(value, (Class<?>) types[0]);
setter.invoke(target, param);
success = true;
}
} catch (Exception e) {
getLogger().log(Level.WARNING, "Failed to set value \"" + value
+ "\" to attribute " + attribute, e);
}
if (!success) {
getLogger().info("property " + attribute
+ " ignored by default attribute handler");
}
return success;
} | 3.68 |
hadoop_AbstractRMAdminRequestInterceptor_init | /**
* Initializes the {@link RMAdminRequestInterceptor}.
*/
@Override
public void init(String userName) {
this.user = RouterServerUtil.setupUser(userName);
if (this.nextInterceptor != null) {
this.nextInterceptor.init(userName);
}
} | 3.68 |
hadoop_FsLinkResolution_resolve | /**
* Apply the given function to the resolved path under the the supplied
* FileContext.
* @param fileContext file context to resolve under
* @param path path to resolve
* @param fn function to invoke
* @param <T> return type.
* @return the return value of the function as revoked against the resolved
* path.
* @throws UnresolvedLinkException link resolution failure
* @throws IOException other IO failure.
*/
public static <T> T resolve(
final FileContext fileContext, final Path path,
final FsLinkResolutionFunction<T> fn)
throws UnresolvedLinkException, IOException {
return new FsLinkResolution<>(fn).resolve(fileContext, path);
} | 3.68 |
zxing_ECIEncoderSet_getPriorityEncoderIndex | /*
* returns -1 if no priority charset was defined
*/
public int getPriorityEncoderIndex() {
return priorityEncoderIndex;
} | 3.68 |
morf_DatabaseDataSetConsumer_table | /**
* @see org.alfasoftware.morf.dataset.DataSetConsumer#table(org.alfasoftware.morf.metadata.Table, java.lang.Iterable)
*/
@Override
public void table(Table table, Iterable<Record> records) {
TableLoader.builder()
.withConnection(connection)
.withSqlScriptExecutor(sqlExecutor)
.withDialect(sqlDialect)
.explicitCommit(true)
.truncateBeforeLoad()
.insertingWithPresetAutonums()
.forTable(table)
.load(records);
} | 3.68 |
framework_AbstractClickEventHandler_registerHandler | /**
* Registers the given handler to the widget so that the necessary events
* are passed to this {@link ClickEventHandler}.
* <p>
* By default registers the handler with the connector root widget.
* </p>
*
* @param <H>
* @param handler
* The handler to register
* @param type
* The type of the handler.
* @return A reference for the registration of the handler.
*/
protected <H extends EventHandler> HandlerRegistration registerHandler(
final H handler, DomEvent.Type<H> type) {
return connector.getWidget().addDomHandler(handler, type);
} | 3.68 |
framework_LegacyApplication_addWindow | /**
* Adds a new browser level window to this application.
*
* @param uI
* the UI window to add to the application
*/
public void addWindow(LegacyWindow uI) {
if (uI.getName() == null) {
String name = Integer.toString(namelessUIIndex++);
uI.setName(name);
}
uI.setApplication(this);
legacyUINames.put(uI.getName(), uI);
uI.setSession(VaadinSession.getCurrent());
} | 3.68 |
flink_FileInputFormat_getFilePaths | /**
* Returns the paths of all files to be read by the FileInputFormat.
*
* @return The list of all paths to read.
*/
public Path[] getFilePaths() {
if (supportsMultiPaths()) {
if (this.filePaths == null) {
return new Path[0];
}
return this.filePaths;
} else {
if (this.filePath == null) {
return new Path[0];
}
return new Path[] {filePath};
}
} | 3.68 |
hbase_CellBlockBuilder_createCellScannerReusingBuffers | /**
* Create a cell scanner using an existing bytebuff.
* @param codec to use for cellblock
* @param cellBlock ByteBuffer containing the cells written by the Codec. The buffer should be
* position()'ed at the start of the cell block and limit()'ed at the end.
* @return CellScanner to work against the content of <code>cellBlock</code>. All cells created
* out of the CellScanner will share the same ByteBuffer being passed.
* @throws IOException if cell encoding fails
*/
public CellScanner createCellScannerReusingBuffers(final Codec codec,
final CompressionCodec compressor, ByteBuff cellBlock) throws IOException {
// Use this method from HRS to create the CellScanner
// If compressed, decompress it first before passing it on else we will leak compression
// resources if the stream is not closed properly after we let it out.
if (compressor != null) {
cellBlock = decompress(compressor, cellBlock);
}
return codec.getDecoder(cellBlock);
} | 3.68 |
hbase_IpcClientSpanBuilder_buildSpanName | /**
* Construct an RPC span name.
*/
public static String buildSpanName(final String packageAndService, final String method) {
return packageAndService + "/" + method;
} | 3.68 |
pulsar_KerberosName_setConfiguration | /**
* Set the static configuration to get the rules.
*
* @throws IOException
*/
public static void setConfiguration() throws IOException {
String ruleString = System.getProperty("zookeeper.security.auth_to_local", "DEFAULT");
rules = parseRules(ruleString);
} | 3.68 |
hbase_Append_getTimeRange | /**
* Gets the TimeRange used for this append.
*/
public TimeRange getTimeRange() {
return this.tr;
} | 3.68 |
hadoop_SQLDelegationTokenSecretManager_cancelToken | /**
* Cancels a token by removing it from the SQL database. This will
* call the corresponding method in {@link AbstractDelegationTokenSecretManager}
* to perform validation and remove the token from the cache.
* @return Identifier of the canceled token
*/
@Override
public synchronized TokenIdent cancelToken(Token<TokenIdent> token,
String canceller) throws IOException {
TokenIdent id = createTokenIdent(token.getIdentifier());
// Calling getTokenInfo to load token into local cache if not present.
// super.cancelToken() requires token to be present in local cache.
getTokenInfo(id);
return super.cancelToken(token, canceller);
} | 3.68 |
hbase_RegionMover_load | /**
* Loads the specified {@link #hostname} with regions listed in the {@link #filename} RegionMover
* Object has to be created using {@link #RegionMover(RegionMoverBuilder)}
* @return true if loading succeeded, false otherwise
*/
public boolean load() throws ExecutionException, InterruptedException, TimeoutException {
ExecutorService loadPool = Executors.newFixedThreadPool(1);
Future<Boolean> loadTask = loadPool.submit(getMetaRegionMovePlan());
boolean isMetaMoved = waitTaskToFinish(loadPool, loadTask, "loading");
if (!isMetaMoved) {
return false;
}
loadPool = Executors.newFixedThreadPool(1);
loadTask = loadPool.submit(getNonMetaRegionsMovePlan());
return waitTaskToFinish(loadPool, loadTask, "loading");
} | 3.68 |
hadoop_CopyCommandWithMultiThread_setThreadCount | /**
* set thread count by option value, if the value less than 1,
* use 1 instead.
*
* @param optValue option value
*/
protected void setThreadCount(String optValue) {
if (optValue != null) {
threadCount = Math.max(Integer.parseInt(optValue), 1);
}
} | 3.68 |
hbase_DictionaryCache_getDictionary | /**
* Load a dictionary or return a previously cached load.
* @param conf configuration
* @param path the hadoop Path where the dictionary is located, as a String
* @return the dictionary bytes if successful, null otherwise
*/
public static byte[] getDictionary(final Configuration conf, final String path)
throws IOException {
if (path == null || path.isEmpty()) {
return null;
}
// Create the dictionary loading cache if we haven't already
if (CACHE == null) {
synchronized (DictionaryCache.class) {
if (CACHE == null) {
final int maxSize = conf.getInt(DICTIONARY_MAX_SIZE_KEY, DEFAULT_DICTIONARY_MAX_SIZE);
CACHE = CacheBuilder.newBuilder().maximumSize(100).expireAfterAccess(10, TimeUnit.MINUTES)
.build(new CacheLoader<String, byte[]>() {
@Override
public byte[] load(String s) throws Exception {
byte[] bytes;
if (path.startsWith(RESOURCE_SCHEME)) {
bytes = loadFromResource(conf, path, maxSize);
} else {
bytes = loadFromHadoopFs(conf, path, maxSize);
}
LOG.info("Loaded dictionary from {} (size {})", s, bytes.length);
return bytes;
}
});
}
}
}
// Get or load the dictionary for the given path
try {
return CACHE.get(path);
} catch (ExecutionException e) {
throw new IOException(e);
}
} | 3.68 |
morf_SelectStatementBuilder_withDialectSpecificHint | /**
* Supplies a specified custom hint and database type to the database for a query.
*
* @param databaseType a database type identifier. Eg: ORACLE, PGSQL, SQL_SERVER
* @param hintContents the hint contents themselves, without the delimiters. Eg: without /*+ and *"/ * for Oracle hints
* @return this, for method chaining.
*/
public org.alfasoftware.morf.sql.SelectStatementBuilder withDialectSpecificHint(String databaseType, String hintContents) {
DialectSpecificHint dialectSpecificHint = new DialectSpecificHint(databaseType, hintContents);
this.hints.add(dialectSpecificHint);
return this;
} | 3.68 |
hadoop_WebServiceClient_initialize | /**
* Construct a new WebServiceClient based on the configuration. It will try to
* load SSL certificates when it is specified.
*
* @param conf configuration.
* @throws Exception exception occur.
*/
public static void initialize(Configuration conf) throws Exception {
if (instance == null) {
synchronized (WebServiceClient.class) {
if (instance == null) {
isHttps = YarnConfiguration.useHttps(conf);
if (isHttps) {
createSSLFactory(conf);
}
instance = new WebServiceClient();
}
}
}
} | 3.68 |
dubbo_ApolloDynamicConfiguration_getProperties | /**
* Recommend specify namespace and group when using Apollo.
* <p>
* <dubbo:config-center namespace="governance" group="dubbo" />, 'dubbo=governance' is for governance rules while
* 'group=dubbo' is for properties files.
*
* @param key default value is 'dubbo.properties', currently useless for Apollo.
* @param group
* @param timeout
* @return
* @throws IllegalStateException
*/
@Override
public String getProperties(String key, String group, long timeout) throws IllegalStateException {
if (StringUtils.isEmpty(group)) {
return dubboConfigFile.getContent();
}
if (group.equals(url.getApplication())) {
return ConfigService.getConfigFile(APOLLO_APPLICATION_KEY, ConfigFileFormat.Properties)
.getContent();
}
ConfigFile configFile = ConfigService.getConfigFile(group, ConfigFileFormat.Properties);
if (configFile == null) {
throw new IllegalStateException("There is no namespace named " + group + " in Apollo.");
}
return configFile.getContent();
} | 3.68 |
flink_HiveInspectors_getArgInspectors | /** Get object inspector for each function argument. */
public static ObjectInspector[] getArgInspectors(
HiveShim hiveShim, HiveFunctionArguments arguments) {
ObjectInspector[] inspectors = new ObjectInspector[arguments.size()];
for (int i = 0; i < inspectors.length; i++) {
if (arguments.isLiteral(i)) {
Object constant = arguments.getArg(i);
PrimitiveTypeInfo primitiveTypeInfo =
(PrimitiveTypeInfo)
HiveTypeUtil.toHiveTypeInfo(arguments.getDataType(i), false);
constant =
getConversion(
getObjectInspector(primitiveTypeInfo),
arguments.getDataType(i).getLogicalType(),
hiveShim)
.toHiveObject(constant);
inspectors[i] =
getObjectInspectorForPrimitiveConstant(
primitiveTypeInfo, constant, hiveShim);
} else {
inspectors[i] =
TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(
HiveTypeUtil.toHiveTypeInfo(arguments.getDataType(i), false));
}
}
return inspectors;
} | 3.68 |
hbase_Scan_setRaw | /**
* Enable/disable "raw" mode for this scan. If "raw" is enabled the scan will return all delete
* marker and deleted rows that have not been collected, yet. This is mostly useful for Scan on
* column families that have KEEP_DELETED_ROWS enabled. It is an error to specify any column when
* "raw" is set.
* @param raw True/False to enable/disable "raw" mode.
*/
public Scan setRaw(boolean raw) {
setAttribute(RAW_ATTR, Bytes.toBytes(raw));
return this;
} | 3.68 |
hadoop_SingleFilePerBlockCache_addToLinkedListHead | /**
* Helper method to add the given entry to the head of the linked list.
*
* @param entry Block entry to add.
*/
private void addToLinkedListHead(Entry entry) {
blocksLock.writeLock().lock();
try {
addToHeadOfLinkedList(entry);
} finally {
blocksLock.writeLock().unlock();
}
} | 3.68 |
dubbo_Configuration_containsKey | /**
* Check if the configuration contains the specified key.
*
* @param key the key whose presence in this configuration is to be tested
* @return {@code true} if the configuration contains a value for this
* key, {@code false} otherwise
*/
default boolean containsKey(String key) {
return !isEmptyValue(getProperty(key));
} | 3.68 |
hbase_HRegion_checkAndMergeCPMutations | // TODO Support Increment/Append operations
private void checkAndMergeCPMutations(final MiniBatchOperationInProgress<Mutation> miniBatchOp,
final List<RowLock> acquiredRowLocks, final long timestamp) throws IOException {
visitBatchOperations(true, nextIndexToProcess + miniBatchOp.size(), (int i) -> {
// we pass (i - firstIndex) below since the call expects a relative index
Mutation[] cpMutations = miniBatchOp.getOperationsFromCoprocessors(i - nextIndexToProcess);
if (cpMutations == null) {
return true;
}
// Else Coprocessor added more Mutations corresponding to the Mutation at this index.
Mutation mutation = getMutation(i);
for (Mutation cpMutation : cpMutations) {
this.checkAndPrepareMutation(cpMutation, timestamp);
// Acquire row locks. If not, the whole batch will fail.
acquiredRowLocks.add(region.getRowLock(cpMutation.getRow(), true, null));
// Returned mutations from coprocessor correspond to the Mutation at index i. We can
// directly add the cells from those mutations to the familyMaps of this mutation.
Map<byte[], List<Cell>> cpFamilyMap = cpMutation.getFamilyCellMap();
region.rewriteCellTags(cpFamilyMap, mutation);
// will get added to the memStore later
mergeFamilyMaps(familyCellMaps[i], cpFamilyMap);
// The durability of returned mutation is replaced by the corresponding mutation.
// If the corresponding mutation contains the SKIP_WAL, we shouldn't count the
// cells of returned mutation.
if (region.getEffectiveDurability(mutation.getDurability()) != Durability.SKIP_WAL) {
for (List<Cell> cells : cpFamilyMap.values()) {
miniBatchOp.addCellCount(cells.size());
}
}
}
return true;
});
} | 3.68 |
hadoop_CSQueueStore_getByFullName | /**
* Returns a queue by looking it up by its fully qualified name.
* @param fullName The full name/path of the queue
* @return The queue or null if none found
*/
CSQueue getByFullName(String fullName) {
if (fullName == null) {
return null;
}
try {
modificationLock.readLock().lock();
return fullNameQueues.getOrDefault(fullName, null);
} finally {
modificationLock.readLock().unlock();
}
} | 3.68 |
hadoop_JavaCommandLineBuilder_enableJavaAssertions | /**
* Turn Java assertions on
*/
public void enableJavaAssertions() {
add("-ea");
add("-esa");
} | 3.68 |
hudi_InternalSchemaChangeApplier_applyDeleteChange | /**
* Delete columns to table.
*
* @param colNames col name to be deleted. if we want to delete col from a nested filed, the fullName should be specify
*/
public InternalSchema applyDeleteChange(String... colNames) {
TableChanges.ColumnDeleteChange delete = TableChanges.ColumnDeleteChange.get(latestSchema);
Arrays.stream(colNames).forEach(colName -> delete.deleteColumn(colName));
return SchemaChangeUtils.applyTableChanges2Schema(latestSchema, delete);
} | 3.68 |
hbase_ColumnValueFilter_compareValue | /**
* This method is used to determine a cell should be included or filtered out.
* @param op one of operators {@link CompareOperator}
* @param comparator comparator used to compare cells.
* @param cell cell to be compared.
* @return true means cell should be filtered out, included otherwise.
*/
private boolean compareValue(final CompareOperator op, final ByteArrayComparable comparator,
final Cell cell) {
if (op == CompareOperator.NO_OP) {
return true;
}
int compareResult = PrivateCellUtil.compareValue(cell, comparator);
return CompareFilter.compare(op, compareResult);
} | 3.68 |
hbase_TableNamespaceManager_doesNamespaceExist | /**
* check whether a namespace has already existed.
*/
public boolean doesNamespaceExist(String namespaceName) throws IOException {
return cache.containsKey(namespaceName);
} | 3.68 |
framework_DefaultEditorEventHandler_editRow | /**
* Opens the editor over the row with the given index and attempts to focus
* the editor widget in the given column index. If the given indices are
* outside of the existing range, the closest value within the range is
* used.
*
* @param event
* the wrapped DOM event
* @param rowIndex
* index of the row to edit
* @param colIndex
* index of the editor column to focus
*/
protected void editRow(EditorDomEvent<T> event, int rowIndex,
int colIndex) {
int rowCount = event.getGrid().getDataSource().size();
// Limit rowIndex between 0 and rowCount - 1
rowIndex = Math.max(0, Math.min(rowCount - 1, rowIndex));
int colCount = event.getGrid().getVisibleColumns().size();
// Limit colIndex between 0 and colCount - 1
colIndex = Math.max(0, Math.min(colCount - 1, colIndex));
event.getEditor().editRow(rowIndex, colIndex);
} | 3.68 |
querydsl_MetaDataExporter_setExportDirectForeignKeys | /**
* Set whether direct foreign keys should be exported
*
* @param exportDirectForeignKeys
*/
public void setExportDirectForeignKeys(boolean exportDirectForeignKeys) {
this.exportDirectForeignKeys = exportDirectForeignKeys;
} | 3.68 |
morf_ChangePrimaryKeyColumns_reverse | /**
* @see org.alfasoftware.morf.upgrade.SchemaChange#reverse(org.alfasoftware.morf.metadata.Schema)
*/
@Override
public Schema reverse(Schema schema) {
return applyChange(schema, newPrimaryKeyColumns, oldPrimaryKeyColumns);
} | 3.68 |
hadoop_CSQueueStore_isAmbiguous | /**
* Check for name ambiguity returns true, if there are at least two queues
* with the same short name. Queue named "root" is protected, and it will
* always return the root queue regardless of ambiguity.
* @param shortName The short name to be checked for ambiguity
* @return true if there are at least two queues found false otherwise
*/
boolean isAmbiguous(String shortName) {
if (shortName == null) {
return false;
}
boolean ret = true;
try {
modificationLock.readLock().lock();
Set<String> fullNamesSet = this.shortNameToLongNames.get(shortName);
if (fullNamesSet == null || fullNamesSet.size() <= 1) {
ret = false;
}
} finally {
modificationLock.readLock().unlock();
}
return ret;
} | 3.68 |
hbase_KeyValue_getRowLength | /** Returns Row length */
@Override
public short getRowLength() {
return Bytes.toShort(this.bytes, getKeyOffset());
} | 3.68 |
flink_EvictingWindowReader_aggregate | /**
* Reads window state generated using an {@link AggregateFunction}.
*
* @param uid The uid of the operator.
* @param aggregateFunction The aggregate function used to create the window.
* @param readerFunction The window reader function.
* @param keyType The key type of the window.
* @param inputType The type information of the accumulator function.
* @param outputType The output type of the reader function.
* @param <K> The type of the key.
* @param <T> The type of the values that are aggregated.
* @param <ACC> The type of the accumulator (intermediate aggregate state).
* @param <R> The type of the aggregated result.
* @param <OUT> The output type of the reader function.
* @return A {@code DataSet} of objects read from keyed state.
* @throws IOException If savepoint does not contain the specified uid.
*/
public <K, T, ACC, R, OUT> DataSet<OUT> aggregate(
String uid,
AggregateFunction<T, ACC, R> aggregateFunction,
WindowReaderFunction<R, OUT, K, W> readerFunction,
TypeInformation<K> keyType,
TypeInformation<T> inputType,
TypeInformation<OUT> outputType)
throws IOException {
WindowReaderOperator<?, K, StreamRecord<T>, W, OUT> operator =
WindowReaderOperator.evictingWindow(
new AggregateEvictingWindowReaderFunction<>(
readerFunction, aggregateFunction),
keyType,
windowSerializer,
inputType,
env.getConfig());
return readWindowOperator(uid, outputType, operator);
} | 3.68 |
hadoop_TimelineEventSubDoc_equals | // Only check if id is equal
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof TimelineEventSubDoc)) {
return false;
}
TimelineEventSubDoc otherTimelineEvent = (TimelineEventSubDoc) obj;
return this.timelineEvent.getId().equals(otherTimelineEvent.getId());
} | 3.68 |
pulsar_FunctionCommon_getStateNamespace | /**
* Convert pulsar tenant and namespace to state storage namespace.
*
* @param tenant pulsar tenant
* @param namespace pulsar namespace
* @return state storage namespace
*/
public static String getStateNamespace(String tenant, String namespace) {
return String.format("%s_%s", tenant, namespace)
.replace("-", "_");
} | 3.68 |
hadoop_FederationApplicationHomeSubClusterStoreInputValidator_checkApplicationId | /**
* Validate if the application id is present or not.
*
* @param appId the id of the application to be verified
* @throws FederationStateStoreInvalidInputException if the application Id is
* invalid
*/
private static void checkApplicationId(ApplicationId appId)
throws FederationStateStoreInvalidInputException {
if (appId == null) {
String message = "Missing Application Id."
+ " Please try again by specifying an Application Id.";
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
} | 3.68 |
hadoop_RouterPermissionChecker_checkSuperuserPrivilege | /**
* Check the superuser privileges of the current RPC caller. This method is
* based on Datanode#checkSuperuserPrivilege().
* @throws AccessControlException If the user is not authorized.
*/
@Override
public void checkSuperuserPrivilege() throws AccessControlException {
// Try to get the ugi in the RPC call.
UserGroupInformation ugi = null;
try {
ugi = NameNode.getRemoteUser();
} catch (IOException e) {
// Ignore as we catch it afterwards
}
if (ugi == null) {
LOG.error("Cannot get the remote user name");
throw new AccessControlException("Cannot get the remote user name");
}
// Is this by the Router user itself?
if (ugi.getShortUserName().equals(superUser)) {
return;
}
// Is the user a member of the super group?
if (ugi.getGroupsSet().contains(superGroup)) {
return;
}
// Not a superuser
throw new AccessControlException(
ugi.getUserName() + " is not a super user");
} | 3.68 |
pulsar_PackagesStorage_dataPath | /**
* The extra path for saving package data.
*
* For example, we have a package function://public/default/[email protected],
* it will save the meta to the path function/public/default/package/v0.1/meta,
* and save the data to the path function/public/default/package/v0.1.
* By default, we are using distributed log as the package storage, and it supports
* saving data at a directory.
* But some storage like filesystem don't have the similar ability, it needs another path
* for saving the data.
* This api provides the ability to support saving the data in another place.
* If you specify the data path as `/data`, the package will saved into
* function/public/default/package/v0.1/data.
*
* @return
* the data path
*/
default String dataPath() {
return "";
} | 3.68 |
flink_DataSetUtils_sampleWithSize | /**
* Generate a sample of DataSet which contains fixed size elements.
*
* <p><strong>NOTE:</strong> Sample with fixed size is not as efficient as sample with fraction,
* use sample with fraction unless you need exact precision.
*
* @param withReplacement Whether element can be selected more than once.
* @param numSamples The expected sample size.
* @param seed Random number generator seed.
* @return The sampled DataSet
*/
public static <T> DataSet<T> sampleWithSize(
DataSet<T> input,
final boolean withReplacement,
final int numSamples,
final long seed) {
SampleInPartition<T> sampleInPartition =
new SampleInPartition<>(withReplacement, numSamples, seed);
MapPartitionOperator mapPartitionOperator = input.mapPartition(sampleInPartition);
// There is no previous group, so the parallelism of GroupReduceOperator is always 1.
String callLocation = Utils.getCallLocationName();
SampleInCoordinator<T> sampleInCoordinator =
new SampleInCoordinator<>(withReplacement, numSamples, seed);
return new GroupReduceOperator<>(
mapPartitionOperator, input.getType(), sampleInCoordinator, callLocation);
} | 3.68 |
hadoop_BytesWritable_getLength | /**
* Get the current size of the buffer.
*/
@Override
public int getLength() {
return size;
} | 3.68 |
framework_Tree_getItemStyleGenerator | /**
* @return the current {@link ItemStyleGenerator} for this tree. Null if
* {@link ItemStyleGenerator} is not set.
*/
public ItemStyleGenerator getItemStyleGenerator() {
return itemStyleGenerator;
} | 3.68 |
dubbo_InternalThreadLocal_remove | /**
* Sets the value to uninitialized for the specified thread local map;
* a proceeding call to get() will trigger a call to initialValue().
* The specified thread local map must be for the current thread.
*/
@SuppressWarnings("unchecked")
public final void remove(InternalThreadLocalMap threadLocalMap) {
if (threadLocalMap == null) {
return;
}
Object v = threadLocalMap.removeIndexedVariable(index);
removeFromVariablesToRemove(threadLocalMap, this);
if (v != InternalThreadLocalMap.UNSET) {
try {
onRemoval((V) v);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
} | 3.68 |
flink_SourceCoordinatorContext_failJob | /**
* Fail the job with the given cause.
*
* @param cause the cause of the job failure.
*/
void failJob(Throwable cause) {
operatorCoordinatorContext.failJob(cause);
} | 3.68 |
hbase_BucketCache_evictBucketEntryIfNoRpcReferenced | /**
* Evict {@link BlockCacheKey} and its corresponding {@link BucketEntry} only if
* {@link BucketEntry#isRpcRef} is false. <br/>
* NOTE:When evict from {@link BucketCache#backingMap},only the matched {@link BlockCacheKey} and
* {@link BucketEntry} could be removed.
* @param blockCacheKey {@link BlockCacheKey} to evict.
* @param bucketEntry {@link BucketEntry} matched {@link BlockCacheKey} to evict.
* @return true to indicate whether we've evicted successfully or not.
*/
boolean evictBucketEntryIfNoRpcReferenced(BlockCacheKey blockCacheKey, BucketEntry bucketEntry) {
if (!bucketEntry.isRpcRef()) {
return doEvictBlock(blockCacheKey, bucketEntry, true);
}
return false;
} | 3.68 |
framework_BasicEvent_isAllDay | /*
* (non-Javadoc)
*
* @see com.vaadin.addon.calendar.event.CalendarEvent#isAllDay()
*/
@Override
public boolean isAllDay() {
return isAllDay;
} | 3.68 |
framework_GridKeyDownEvent_getNativeKeyCode | /**
* Gets the native key code. These key codes are enumerated in the
* {@link KeyCodes} class.
*
* @return the key code
*/
public int getNativeKeyCode() {
return getNativeEvent().getKeyCode();
} | 3.68 |
hudi_GenericRecordFullPayloadGenerator_isOption | /**
* Check whether a schema is option. return true if it match the follows: 1. Its type is Type.UNION 2. Has two types 3. Has a NULL type.
*/
protected boolean isOption(Schema schema) {
return schema.getType().equals(Schema.Type.UNION)
&& schema.getTypes().size() == 2
&& (schema.getTypes().get(0).getType().equals(Schema.Type.NULL)
|| schema.getTypes().get(1).getType().equals(Schema.Type.NULL));
} | 3.68 |
morf_XmlDataSetProducer_viewNames | /**
* @see org.alfasoftware.morf.metadata.Schema#viewNames()
*/
@Override
public Collection<String> viewNames() {
return Collections.emptySet();
} | 3.68 |
hadoop_HSAuditLogger_createFailureLog | /**
* A helper api for creating an audit log for a failure event.
*/
static String createFailureLog(String user, String operation, String perm,
String target, String description) {
StringBuilder b = new StringBuilder();
start(Keys.USER, user, b);
addRemoteIP(b);
add(Keys.OPERATION, operation, b);
add(Keys.TARGET, target, b);
add(Keys.RESULT, AuditConstants.FAILURE, b);
add(Keys.DESCRIPTION, description, b);
add(Keys.PERMISSIONS, perm, b);
return b.toString();
} | 3.68 |
pulsar_SecurityUtil_loginSimple | /**
* Initializes UserGroupInformation with the given Configuration and
* returns UserGroupInformation.getLoginUser(). All logins should happen
* through this class to ensure other threads are not concurrently
* modifying UserGroupInformation.
*
* @param config the configuration instance
*
* @return the UGI for the given principal
*
* @throws IOException if login failed
*/
public static synchronized UserGroupInformation loginSimple(final Configuration config) throws IOException {
Validate.notNull(config);
UserGroupInformation.setConfiguration(config);
return UserGroupInformation.getLoginUser();
} | 3.68 |
flink_DefaultJobLeaderService_containsJob | /**
* Check whether the service monitors the given job.
*
* @param jobId identifying the job
* @return True if the given job is monitored; otherwise false
*/
@Override
@VisibleForTesting
public boolean containsJob(JobID jobId) {
Preconditions.checkState(
DefaultJobLeaderService.State.STARTED == state,
"The service is currently not running.");
return jobLeaderServices.containsKey(jobId);
} | 3.68 |
flink_Pattern_notFollowedBy | /**
* Appends a new pattern to the existing one. The new pattern enforces that there is no event
* matching this pattern between the preceding pattern and succeeding this one.
*
* <p><b>NOTE:</b> There has to be other pattern after this one.
*
* @param name Name of the new pattern
* @return A new pattern which is appended to this one
*/
public Pattern<T, T> notFollowedBy(final String name) {
if (quantifier.hasProperty(Quantifier.QuantifierProperty.OPTIONAL)) {
throw new UnsupportedOperationException(
"Specifying a pattern with an optional path to NOT condition is not supported yet. "
+ "You can simulate such pattern with two independent patterns, one with and the other without "
+ "the optional part.");
}
return new Pattern<>(name, this, ConsumingStrategy.NOT_FOLLOW, afterMatchSkipStrategy);
} | 3.68 |
framework_VUI_performSizeCheck | /**
* Called when the window or parent div might have been resized.
*
* This immediately checks the sizes of the window and the parent div (if
* monitoring it) and triggers layout recalculation if they have changed.
*/
protected void performSizeCheck() {
windowSizeMaybeChanged(Window.getClientWidth(),
Window.getClientHeight());
} | 3.68 |
flink_BooleanConditions_falseFunction | /** @return An {@link IterativeCondition} that always returns {@code false}. */
public static <T> IterativeCondition<T> falseFunction() {
return SimpleCondition.of(value -> false);
} | 3.68 |
hbase_ByteBuff_checkRefCount | /**
* Checks that there are still references to the buffer. This protects against the case where a
* ByteBuff method (i.e. slice, get, etc) could be called against a buffer whose backing data may
* have been released. We only need to do this check if the refCnt has a recycler. If there's no
* recycler, the backing data will be handled by normal java GC and won't get incorrectly
* released. So we can avoid the overhead of checking the refCnt on every call. See HBASE-27710.
*/
protected void checkRefCount() {
if (refCnt.hasRecycler()) {
ObjectUtil.checkPositive(refCnt(), REFERENCE_COUNT_NAME);
}
} | 3.68 |
hadoop_RegistryDNSServer_manageRegistryDNS | /**
* Performs operations required to setup the DNS registry instance (e.g. sets
* up a path listener to react to service record creation/deletion and invoke
* the appropriate registry method).
*/
private void manageRegistryDNS() {
try {
registryOperations.instantiateCacheForRegistry();
registryOperations.registerPathListener(new PathListener() {
private String registryRoot = getConfig().
get(RegistryConstants.KEY_REGISTRY_ZK_ROOT,
RegistryConstants.DEFAULT_ZK_REGISTRY_ROOT);
@Override
public void nodeAdded(String path) throws IOException {
// get a listing of service records
String relativePath = getPathRelativeToRegistryRoot(path);
String child = RegistryPathUtils.lastPathEntry(path);
Map<String, RegistryPathStatus> map = new HashMap<>();
map.put(child, registryOperations.stat(relativePath));
Map<String, ServiceRecord> records =
RegistryUtils.extractServiceRecords(registryOperations,
getAdjustedParentPath(path),
map);
processServiceRecords(records, register);
pathToRecordMap.putAll(records);
}
private String getAdjustedParentPath(String path) {
Preconditions.checkNotNull(path);
String adjustedPath = null;
adjustedPath = getPathRelativeToRegistryRoot(path);
try {
return RegistryPathUtils.parentOf(adjustedPath);
} catch (PathNotFoundException e) {
// attempt to use passed in path
return path;
}
}
private String getPathRelativeToRegistryRoot(String path) {
String adjustedPath;
if (path.equals(registryRoot)) {
adjustedPath = "/";
} else {
adjustedPath = path.substring(registryRoot.length());
}
return adjustedPath;
}
@Override
public void nodeRemoved(String path) throws IOException {
ServiceRecord record = pathToRecordMap.remove(path.substring(
registryRoot.length()));
processServiceRecord(path, record, delete);
}
});
registryOperations.startCache();
// create listener for record deletions
} catch (Exception e) {
LOG.warn("Unable to monitor the registry. DNS support disabled.", e);
}
} | 3.68 |
hbase_CellUtil_copyRowTo | /**
* Copies the row to the given bytebuffer
* @param cell cell the cell whose row has to be copied
* @param destination the destination bytebuffer to which the row has to be copied
* @param destinationOffset the offset in the destination byte[]
* @return the offset of the bytebuffer after the copy has happened
*/
public static int copyRowTo(Cell cell, ByteBuffer destination, int destinationOffset) {
short rowLen = cell.getRowLength();
if (cell instanceof ByteBufferExtendedCell) {
ByteBufferUtils.copyFromBufferToBuffer(((ByteBufferExtendedCell) cell).getRowByteBuffer(),
destination, ((ByteBufferExtendedCell) cell).getRowPosition(), destinationOffset, rowLen);
} else {
ByteBufferUtils.copyFromArrayToBuffer(destination, destinationOffset, cell.getRowArray(),
cell.getRowOffset(), rowLen);
}
return destinationOffset + rowLen;
} | 3.68 |
hadoop_IOStatisticsLogging_logIOStatisticsAtLevel | /**
* A method to log IOStatistics from a source at different levels.
*
* @param log Logger for logging.
* @param level LOG level.
* @param source Source to LOG.
*/
public static void logIOStatisticsAtLevel(Logger log, String level,
Object source) {
IOStatistics stats = retrieveIOStatistics(source);
if (stats != null) {
switch (level.toLowerCase(Locale.US)) {
case IOSTATISTICS_LOGGING_LEVEL_INFO:
LOG.info("IOStatistics: {}", ioStatisticsToPrettyString(stats));
break;
case IOSTATISTICS_LOGGING_LEVEL_ERROR:
LOG.error("IOStatistics: {}", ioStatisticsToPrettyString(stats));
break;
case IOSTATISTICS_LOGGING_LEVEL_WARN:
LOG.warn("IOStatistics: {}", ioStatisticsToPrettyString(stats));
break;
default:
logIOStatisticsAtDebug(log, "IOStatistics: {}", source);
}
}
} | 3.68 |
hmily_ConfigLoader_againLoad | /**
* Again load.
*
* @param context the context
* @param handler the handler
* @param tClass the t class
*/
default void againLoad(final Supplier<Context> context, final LoaderHandler<T> handler, final Class<T> tClass) {
T config = ConfigEnv.getInstance().getConfig(tClass);
for (PropertyKeySource<?> propertyKeySource : context.get().getSource()) {
ConfigPropertySource configPropertySource = new DefaultConfigPropertySource<>(propertyKeySource, PropertyKeyParse.INSTANCE);
Binder binder = Binder.of(configPropertySource);
T newConfig = binder.bind(config.prefix(), BindData.of(DataType.of(tClass), () -> config));
handler.finish(context, newConfig);
}
} | 3.68 |
hbase_BloomFilterChunk_set | /**
* Set the bit at the specified index to 1.
* @param pos index of bit
*/
void set(long pos) {
int bytePos = (int) (pos / 8);
int bitPos = (int) (pos % 8);
byte curByte = bloom.get(bytePos);
curByte |= BloomFilterUtil.bitvals[bitPos];
bloom.put(bytePos, curByte);
} | 3.68 |
hudi_SparkHoodieHBaseIndex_doMutations | /**
* Helper method to facilitate performing mutations (including puts and deletes) in Hbase.
*/
private void doMutations(BufferedMutator mutator, List<Mutation> mutations, RateLimiter limiter) throws IOException {
if (mutations.isEmpty()) {
return;
}
// report number of operations to account per second with rate limiter.
// If #limiter.getRate() operations are acquired within 1 second, ratelimiter will limit the rest of calls
// for within that second
limiter.tryAcquire(mutations.size());
mutator.mutate(mutations);
mutator.flush();
mutations.clear();
} | 3.68 |
framework_NestedMethodProperty_getValue | /**
* Gets the value stored in the Property. The value is resolved by calling
* the specified getter methods on the current instance:
*
* @return the value of the Property
* @see #getInstance()
*/
@Override
public T getValue() {
try {
Object object = instance;
for (Method m : getMethods) {
object = m.invoke(object);
if (object == null) {
return null;
}
}
return (T) object;
} catch (final Throwable e) {
throw new MethodException(this, e);
}
} | 3.68 |
rocketmq-connect_DorisSinkConnector_validate | /**
* Should invoke before start the connector.
*
* @param config
* @return error message
*/
@Override
public void validate(KeyValue config) {
// do validate config
} | 3.68 |
framework_MultiSelectionModelImpl_onSelectAll | /**
* Triggered when the user checks the select all checkbox.
*
* @param userOriginated
* {@code true} if originated from client side by user
*/
protected void onSelectAll(boolean userOriginated) {
if (userOriginated) {
verifyUserCanSelectAll();
// all selected state has been updated in client side already
getState(false).allSelected = true;
getUI().getConnectorTracker().getDiffState(this).put("allSelected",
true);
} else {
getState().allSelected = true;
}
Stream<T> allItemsStream;
DataProvider<T, ?> dataProvider = getGrid().getDataProvider();
// this will fetch everything from backend
if (dataProvider instanceof HierarchicalDataProvider) {
allItemsStream = fetchAllHierarchical(
(HierarchicalDataProvider<T, ?>) dataProvider);
} else {
allItemsStream = fetchAll(dataProvider);
}
LinkedHashSet<T> allItems = new LinkedHashSet<>();
allItemsStream.forEach(allItems::add);
updateSelection(allItems, Collections.emptySet(), userOriginated);
} | 3.68 |
framework_Table_removeListener | /**
* @deprecated As of 7.0, replaced by
* {@link #removeColumnReorderListener(ColumnReorderListener)}
*/
@Deprecated
public void removeListener(ColumnReorderListener listener) {
removeColumnReorderListener(listener);
} | 3.68 |
hbase_ReplicationSourceShipper_clearWALEntryBatch | /**
* Attempts to properly update <code>ReplicationSourceManager.totalBufferUser</code>, in case
* there were unprocessed entries batched by the reader to the shipper, but the shipper didn't
* manage to ship those because the replication source is being terminated. In that case, it
* iterates through the batched entries and decrease the pending entries size from
* <code>ReplicationSourceManager.totalBufferUser</code>
* <p/>
* <b>NOTES</b> 1) This method should only be called upon replication source termination. It
* blocks waiting for both shipper and reader threads termination, to make sure no race conditions
* when updating <code>ReplicationSourceManager.totalBufferUser</code>. 2) It <b>does not</b>
* attempt to terminate reader and shipper threads. Those <b>must</b> have been triggered
* interruption/termination prior to calling this method.
*/
void clearWALEntryBatch() {
long timeout = EnvironmentEdgeManager.currentTime() + this.shipEditsTimeout;
while (this.isAlive() || this.entryReader.isAlive()) {
try {
if (EnvironmentEdgeManager.currentTime() >= timeout) {
LOG.warn(
"Shipper clearWALEntryBatch method timed out whilst waiting reader/shipper "
+ "thread to stop. Not cleaning buffer usage. Shipper alive: {}; Reader alive: {}",
this.source.getPeerId(), this.isAlive(), this.entryReader.isAlive());
return;
} else {
// Wait both shipper and reader threads to stop
Thread.sleep(this.sleepForRetries);
}
} catch (InterruptedException e) {
LOG.warn("{} Interrupted while waiting {} to stop on clearWALEntryBatch. "
+ "Not cleaning buffer usage: {}", this.source.getPeerId(), this.getName(), e);
return;
}
}
long totalReleasedBytes = 0;
while (true) {
WALEntryBatch batch = entryReader.entryBatchQueue.poll();
if (batch == null) {
break;
}
totalReleasedBytes += source.getSourceManager().releaseWALEntryBatchBufferQuota(batch);
}
if (LOG.isTraceEnabled()) {
LOG.trace("Decrementing totalBufferUsed by {}B while stopping Replication WAL Readers.",
totalReleasedBytes);
}
} | 3.68 |
hbase_MasterObserver_preSetRegionServerQuota | /**
* Called before the quota for the region server is stored.
* @param ctx the environment to interact with the framework and master
* @param regionServer the name of the region server
* @param quotas the current quota for the region server
*/
default void preSetRegionServerQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String regionServer, final GlobalQuotaSettings quotas) throws IOException {
} | 3.68 |
hbase_HFileBlock_getUncompressedSizeWithHeader | /**
* The uncompressed size of the block data, including header size.
*/
public int getUncompressedSizeWithHeader() {
expectState(State.BLOCK_READY);
return baosInMemory.size();
} | 3.68 |
dubbo_MetadataReport_getServiceAppMapping | /**
* Service<-->Application Mapping -- START
**/
default Set<String> getServiceAppMapping(String serviceKey, MappingListener listener, URL url) {
return Collections.emptySet();
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.