name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_MasterObserver_preGrant | /**
* Called before granting user permissions.
* @param ctx the coprocessor instance's environment
* @param userPermission the user and permissions
* @param mergeExistingPermissions True if merge with previous granted permissions
*/
default void preGrant(ObserverContext<MasterCoprocessorEnvironment> ctx,
UserPermission userPermission, boolean mergeExistingPermissions) throws IOException {
} | 3.68 |
morf_ArchiveDataSetWriter_clearDestination | /**
* @see org.alfasoftware.morf.xml.XmlStreamProvider.XmlOutputStreamProvider#clearDestination()
*/
@Override
public void clearDestination() {
// No-op. Done by the open method.
} | 3.68 |
hadoop_ResourceTypeInfo_newInstance | /**
* Create a new instance of ResourceTypeInfo from name.
*
* @param name name of resource type
* @return the new ResourceTypeInfo object
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public static ResourceTypeInfo newInstance(String name) {
return ResourceTypeInfo.newInstance(name, "");
} | 3.68 |
hbase_LogLevel_main | /**
* A command line implementation
*/
public static void main(String[] args) throws Exception {
CLI cli = new CLI(new Configuration());
System.exit(cli.run(args));
} | 3.68 |
hudi_HoodieRepairTool_readConfigFromFileSystem | /**
* Reads config from the file system.
*
* @param jsc {@link JavaSparkContext} instance.
* @param cfg {@link Config} instance.
* @return the {@link TypedProperties} instance.
*/
private TypedProperties readConfigFromFileSystem(JavaSparkContext jsc, Config cfg) {
return UtilHelpers.readConfig(jsc.hadoopConfiguration(), new Path(cfg.propsFilePath), cfg.configs)
.getProps(true);
} | 3.68 |
dubbo_ReflectUtils_name2class | /**
* name to class.
* "boolean" => boolean.class
* "java.util.Map[][]" => java.util.Map[][].class
*
* @param cl ClassLoader instance.
* @param name name.
* @return Class instance.
*/
private static Class<?> name2class(ClassLoader cl, String name) throws ClassNotFoundException {
int c = 0, index = name.indexOf('[');
if (index > 0) {
c = (name.length() - index) / 2;
name = name.substring(0, index);
}
if (c > 0) {
StringBuilder sb = new StringBuilder();
while (c-- > 0) {
sb.append('[');
}
if ("void".equals(name)) {
sb.append(JVM_VOID);
} else if ("boolean".equals(name)) {
sb.append(JVM_BOOLEAN);
} else if ("byte".equals(name)) {
sb.append(JVM_BYTE);
} else if ("char".equals(name)) {
sb.append(JVM_CHAR);
} else if ("double".equals(name)) {
sb.append(JVM_DOUBLE);
} else if ("float".equals(name)) {
sb.append(JVM_FLOAT);
} else if ("int".equals(name)) {
sb.append(JVM_INT);
} else if ("long".equals(name)) {
sb.append(JVM_LONG);
} else if ("short".equals(name)) {
sb.append(JVM_SHORT);
} else {
// "java.lang.Object" ==> "Ljava.lang.Object;"
sb.append('L').append(name).append(';');
}
name = sb.toString();
} else {
if ("void".equals(name)) {
return void.class;
}
if ("boolean".equals(name)) {
return boolean.class;
}
if ("byte".equals(name)) {
return byte.class;
}
if ("char".equals(name)) {
return char.class;
}
if ("double".equals(name)) {
return double.class;
}
if ("float".equals(name)) {
return float.class;
}
if ("int".equals(name)) {
return int.class;
}
if ("long".equals(name)) {
return long.class;
}
if ("short".equals(name)) {
return short.class;
}
}
if (cl == null) {
cl = ClassUtils.getClassLoader();
}
return Class.forName(name, true, cl);
} | 3.68 |
flink_HiveParserQB_containsQueryWithoutSourceTable | /**
* returns true, if the query block contains any query, or subquery without a source table. Like
* select current_user(), select current_database()
*
* @return true, if the query block contains any query without a source table
*/
public boolean containsQueryWithoutSourceTable() {
for (HiveParserQBExpr qbexpr : aliasToSubq.values()) {
if (qbexpr.containsQueryWithoutSourceTable()) {
return true;
}
}
return aliasToTabs.size() == 0 && aliasToSubq.size() == 0;
} | 3.68 |
pulsar_LinuxInfoUtils_getTotalNicLimit | /**
* Get all physical nic limit.
* @param nics All nic path
* @param bitRateUnit Bit rate unit
* @return Total nic limit
*/
public static double getTotalNicLimit(List<String> nics, BitRateUnit bitRateUnit) {
return bitRateUnit.convert(nics.stream().mapToDouble(nicPath -> {
try {
return readDoubleFromFile(getReplacedNICPath(NIC_SPEED_TEMPLATE, nicPath));
} catch (IOException e) {
log.error("[LinuxInfo] Failed to get total nic limit.", e);
return 0d;
}
}).sum(), BitRateUnit.Megabit);
} | 3.68 |
flink_TypeInformation_of | /**
* Creates a TypeInformation for a generic type via a utility "type hint". This method can be
* used as follows:
*
* <pre>{@code
* TypeInformation<Tuple2<String, Long>> info = TypeInformation.of(new TypeHint<Tuple2<String, Long>>(){});
* }</pre>
*
* @param typeHint The hint for the generic type.
* @param <T> The generic type.
* @return The TypeInformation object for the type described by the hint.
*/
public static <T> TypeInformation<T> of(TypeHint<T> typeHint) {
return typeHint.getTypeInfo();
} | 3.68 |
framework_DefaultEditorEventHandler_isOpenEvent | /**
* Returns whether the given event should open the editor. The default
* implementation returns true if and only if the event is a doubleclick or
* if it is a keydown event and the keycode is {@link #KEYCODE_OPEN}.
*
* @param event
* the received event
* @return true if the event is an open event, false otherwise
*/
protected boolean isOpenEvent(EditorDomEvent<T> event) {
final Event e = event.getDomEvent();
return e.getTypeInt() == Event.ONDBLCLICK
|| (e.getTypeInt() == Event.ONKEYDOWN
&& e.getKeyCode() == KEYCODE_OPEN)
|| isTouchOpenEvent(event);
} | 3.68 |
rocketmq-connect_LocalStateManagementServiceImpl_prePersist | /**
* pre persist
*/
private void prePersist() {
Map<String, ConnAndTaskStatus.CacheEntry<ConnectorStatus>> connectors = connAndTaskStatus.getConnectors();
if (connectors.isEmpty()) {
return;
}
connectors.forEach((connectName, connectorStatus) -> {
connectorStatusStore.put(connectName, connectorStatus.get());
Map<Integer, ConnAndTaskStatus.CacheEntry<TaskStatus>> cacheTaskStatus = connAndTaskStatus.getTasks().row(connectName);
if (cacheTaskStatus == null) {
return;
}
taskStatusStore.put(connectName, new ArrayList<>());
cacheTaskStatus.forEach((taskId, taskStatus) -> {
if (taskStatus != null) {
taskStatusStore.get(connectName).add(taskStatus.get());
}
});
});
} | 3.68 |
hadoop_Cluster_getSystemDir | /**
* Grab the jobtracker system directory path where
* job-specific files will be placed.
*
* @return the system directory where job-specific files are to be placed.
*/
public Path getSystemDir() throws IOException, InterruptedException {
if (sysDir == null) {
sysDir = new Path(client.getSystemDir());
}
return sysDir;
} | 3.68 |
pulsar_NamespacesBase_internalRemoveBacklogQuota | /**
* Base method for removeBacklogQuota v1 and v2.
* Notion: don't re-use this logic.
*/
protected void internalRemoveBacklogQuota(AsyncResponse asyncResponse, BacklogQuotaType backlogQuotaType) {
validateNamespacePolicyOperationAsync(namespaceName, PolicyName.BACKLOG, PolicyOperation.WRITE)
.thenCompose(__ -> validatePoliciesReadOnlyAccessAsync())
.thenCompose(__ -> namespaceResources().setPoliciesAsync(namespaceName, policies -> {
final BacklogQuotaType quotaType = backlogQuotaType != null ? backlogQuotaType
: BacklogQuotaType.destination_storage;
policies.backlog_quota_map.remove(quotaType);
return policies;
})).thenAccept(__ -> {
asyncResponse.resume(Response.noContent().build());
log.info("[{}] Successfully removed backlog namespace={}, quota={}", clientAppId(), namespaceName,
backlogQuotaType);
}).exceptionally(ex -> {
resumeAsyncResponseExceptionally(asyncResponse, ex);
log.error("[{}] Failed to update backlog quota map for namespace {}",
clientAppId(), namespaceName, ex);
return null;
});
} | 3.68 |
framework_VAbstractCalendarPanel_onCancel | /**
* Notifies submit-listeners of a cancel event
*/
private void onCancel() {
if (getSubmitListener() != null) {
getSubmitListener().onCancel();
}
} | 3.68 |
rocketmq-connect_ConnectMetrics_registry | /**
* get metric registry
*
* @return
*/
public MetricRegistry registry() {
return metricRegistry;
} | 3.68 |
flink_ExpandColumnFunctionsRule_indexOfName | /** Find the index of targetName in the list. Return -1 if not found. */
private static int indexOfName(
List<UnresolvedReferenceExpression> inputFieldReferences, String targetName) {
int i;
for (i = 0; i < inputFieldReferences.size(); ++i) {
if (inputFieldReferences.get(i).getName().equals(targetName)) {
break;
}
}
return i == inputFieldReferences.size() ? -1 : i;
} | 3.68 |
hbase_ReplicationPeer_isPeerEnabled | /**
* Test whether the peer is enabled.
* @return {@code true} if enabled, otherwise {@code false}.
*/
default boolean isPeerEnabled() {
return getPeerState() == PeerState.ENABLED;
} | 3.68 |
hbase_HFileArchiveUtil_getTableArchivePath | /**
* Get the path to the table archive directory based on the configured archive directory.
* <p>
* Assumed that the table should already be archived.
* @param conf {@link Configuration} to read the archive directory property. Can be null
* @param tableName Name of the table to be archived. Cannot be null.
* @return {@link Path} to the archive directory for the table
*/
public static Path getTableArchivePath(final Configuration conf, final TableName tableName)
throws IOException {
return CommonFSUtils.getTableDir(getArchivePath(conf), tableName);
} | 3.68 |
hadoop_AuxServiceFile_srcFile | /**
* This provides the source location of the configuration file, the content
* of which is dumped to dest_file post property substitutions, in the format
* as specified in type. Typically the src_file would point to a source
* controlled network accessible file maintained by tools like puppet, chef,
* or hdfs etc. Currently, only hdfs is supported.
**/
public AuxServiceFile srcFile(String file) {
this.srcFile = file;
return this;
} | 3.68 |
framework_FieldGroup_addCommitHandler | /**
* Adds a commit handler.
* <p>
* The commit handler is called before the field values are committed to the
* item ( {@link CommitHandler#preCommit(CommitEvent)}) and after the item
* has been updated ({@link CommitHandler#postCommit(CommitEvent)}). If a
* {@link CommitHandler} throws a CommitException the whole commit is
* aborted and the fields retain their old values.
*
* @param commitHandler
* The commit handler to add
*/
public void addCommitHandler(CommitHandler commitHandler) {
commitHandlers.add(commitHandler);
} | 3.68 |
graphhopper_PathMerger_updateInstructionsWithContext | /**
* This method iterates over all instructions and uses the available context to improve the instructions.
* If the requests contains a heading, this method can transform the first continue to a u-turn if the heading
* points into the opposite direction of the route.
* At a waypoint it can transform the continue to a u-turn if the route involves turning.
*/
private InstructionList updateInstructionsWithContext(InstructionList instructions) {
Instruction instruction;
Instruction nextInstruction;
for (int i = 0; i < instructions.size() - 1; i++) {
instruction = instructions.get(i);
if (i == 0 && !Double.isNaN(favoredHeading) && instruction.extraInfo.containsKey("heading")) {
double heading = (double) instruction.extraInfo.get("heading");
double diff = Math.abs(heading - favoredHeading) % 360;
if (diff > 170 && diff < 190) {
// The requested heading points into the opposite direction of the calculated heading
// therefore we change the continue instruction to a u-turn
instruction.setSign(Instruction.U_TURN_UNKNOWN);
}
}
if (instruction.getSign() == Instruction.REACHED_VIA) {
nextInstruction = instructions.get(i + 1);
if (nextInstruction.getSign() != Instruction.CONTINUE_ON_STREET
|| !instruction.extraInfo.containsKey("last_heading")
|| !nextInstruction.extraInfo.containsKey("heading")) {
// TODO throw exception?
continue;
}
double lastHeading = (double) instruction.extraInfo.get("last_heading");
double heading = (double) nextInstruction.extraInfo.get("heading");
// Since it's supposed to go back the same edge, we can be very strict with the diff
double diff = Math.abs(lastHeading - heading) % 360;
if (diff > 179 && diff < 181) {
nextInstruction.setSign(Instruction.U_TURN_UNKNOWN);
}
}
}
return instructions;
} | 3.68 |
framework_GeneratedPropertyContainer_getContainerPropertyIds | /**
* Returns a list of propety ids available in this container. This
* collection will contain properties for generated properties. Removed
* properties will not show unless there is a generated property overriding
* those.
*/
@Override
public Collection<?> getContainerPropertyIds() {
Set<Object> wrappedProperties = new LinkedHashSet<Object>(
wrappedContainer.getContainerPropertyIds());
wrappedProperties.removeAll(removedProperties);
wrappedProperties.addAll(propertyGenerators.keySet());
return wrappedProperties;
} | 3.68 |
hbase_ReplicationPeers_getPeer | /**
* Returns the ReplicationPeerImpl for the specified cached peer. This ReplicationPeer will
* continue to track changes to the Peer's state and config. This method returns null if no peer
* has been cached with the given peerId.
* @param peerId id for the peer
* @return ReplicationPeer object
*/
public ReplicationPeerImpl getPeer(String peerId) {
return peerCache.get(peerId);
} | 3.68 |
morf_SqlDialect_getSqlForInsertInto | /**
* Returns the INSERT INTO statement.
*
* @param insertStatement he {@linkplain InsertStatement} object which can be used by the overriding methods to customize the INSERT statement.
* @return the INSERT INTO statement.
*/
protected String getSqlForInsertInto(@SuppressWarnings("unused") InsertStatement insertStatement) {
return "INSERT INTO ";
} | 3.68 |
hbase_HeterogeneousRegionCountCostFunction_readFile | /**
* used to read the rule files from either HDFS or local FS
*/
private List<String> readFile(final String filename) {
if (null == filename) {
return null;
}
try {
if (filename.startsWith("file:")) {
return readFileFromLocalFS(filename);
}
return readFileFromHDFS(filename);
} catch (IOException e) {
LOG.error("cannot read rules file located at ' " + filename + " ':" + e.getMessage());
return null;
}
} | 3.68 |
AreaShop_BuyRegion_setPrice | /**
* Change the price of the region.
* @param price The price to set this region to
*/
public void setPrice(Double price) {
setSetting("buy.price", price);
} | 3.68 |
hadoop_S3APrefetchingInputStream_available | /**
* Returns the number of bytes available for reading without blocking.
*
* @return the number of bytes available for reading without blocking.
* @throws IOException if there is an IO error during this operation.
*/
@Override
public synchronized int available() throws IOException {
throwIfClosed();
return inputStream.available();
} | 3.68 |
hmily_HmilySQLUtil_getExactlyNumber | /**
* Get exactly number value and type.
*
* @param value string to be converted
* @param radix radix
* @return exactly number value and type
*/
public static Number getExactlyNumber(final String value, final int radix) {
try {
return getBigInteger(value, radix);
} catch (final NumberFormatException ex) {
return new BigDecimal(value);
}
} | 3.68 |
morf_Version2to4TransformingReader_readVersion | /**
* Tests whether a given input stream contains XML format 2, and therefore
* should have the transform applied.
* <p>
* This is designed to match the known output format of
* {@link XmlDataSetConsumer} which previously produced invalid XML. It is
* deliberately brittle. There is no need for a more intelligent XML parser
* here.
* </p>
*
* @param bufferedReader The input stream in a buffered reader
* @return true if the transform should be applied. (because it's format 2)
*/
static int readVersion(BufferedReader bufferedReader) {
try {
bufferedReader.mark(1024); // arbitrary read-ahead limit - that's enough to get the info we want
try {
char[] buffer = new char[1024];
int read = bufferedReader.read(buffer);
if (read == -1) {
return -1;
}
String content = new String(buffer, 0, read);
// Apply the transform if the version number is 2 or 1
Pattern pattern = Pattern.compile("table\\sversion=\"(\\d+)\""); //
Matcher matcher = pattern.matcher(content);
if (!matcher.find()) {
return -1;
} else {
return Integer.parseInt(matcher.group(1));
}
} finally {
bufferedReader.reset();
}
} catch (IOException e) {
throw new RuntimeException(e);
}
} | 3.68 |
querydsl_QueryBase_having | /**
* Add filters for aggregation
*
* @param o having conditions
* @return the current object
*/
public Q having(Predicate... o) {
return queryMixin.having(o);
} | 3.68 |
flink_HsMemoryDataManager_spillBuffers | /**
* Spill buffers for each subpartition in a decision.
*
* <p>Note that: The method should not be locked, it is the responsibility of each subpartition
* to maintain thread safety itself.
*
* @param toSpill All buffers that need to be spilled in a decision.
*/
private void spillBuffers(Map<Integer, List<BufferIndexAndChannel>> toSpill) {
CompletableFuture<Void> spillingCompleteFuture = new CompletableFuture<>();
List<BufferWithIdentity> bufferWithIdentities = new ArrayList<>();
toSpill.forEach(
(subpartitionId, bufferIndexAndChannels) -> {
HsSubpartitionMemoryDataManager subpartitionDataManager =
getSubpartitionMemoryDataManager(subpartitionId);
bufferWithIdentities.addAll(
subpartitionDataManager.spillSubpartitionBuffers(
bufferIndexAndChannels, spillingCompleteFuture));
// decrease numUnSpillBuffers as this subpartition's buffer is spill.
numUnSpillBuffers.getAndAdd(-bufferIndexAndChannels.size());
});
FutureUtils.assertNoException(
spiller.spillAsync(bufferWithIdentities)
.thenAccept(
spilledBuffers -> {
fileDataIndex.addBuffers(spilledBuffers);
spillingCompleteFuture.complete(null);
}));
} | 3.68 |
hbase_SegmentFactory_createImmutableSegmentByMerge | // create new flat immutable segment from merging old immutable segments
// for merge
public ImmutableSegment createImmutableSegmentByMerge(final Configuration conf,
final CellComparator comparator, MemStoreSegmentsIterator iterator, int numOfCells,
List<ImmutableSegment> segments, CompactingMemStore.IndexType idxType,
MemStoreCompactionStrategy.Action action) throws IOException {
MemStoreLAB memStoreLAB = getMergedMemStoreLAB(conf, segments);
return createImmutableSegment(conf, comparator, iterator, memStoreLAB, numOfCells, action,
idxType);
} | 3.68 |
hbase_IndividualBytesFieldCell_getRowArray | /**
* Implement Cell interface
*/
// 1) Row
@Override
public byte[] getRowArray() {
// If row is null, the constructor will reject it, by {@link KeyValue#checkParameters()},
// so it is safe to return row without checking.
return row;
} | 3.68 |
framework_VaadinFinderLocatorStrategy_findConnectorsByPath | /**
* Recursively finds connectors for the elements identified by the provided
* path by traversing the connector hierarchy starting from {@code parents}
* connectors.
*
* @param path
* The path identifying elements.
* @param parents
* The list of connectors to start traversing from.
* @return The list of connectors identified by {@code path} or empty list
* if no such connectors could be found.
*/
private List<ComponentConnector> findConnectorsByPath(String path,
List<ComponentConnector> parents) {
boolean findRecursively = path.startsWith("//");
// Strip away the one or two slashes from the beginning of the path
path = path.substring(findRecursively ? 2 : 1);
String[] fragments = splitFirstFragmentFromTheRest(path);
List<ComponentConnector> connectors = new ArrayList<>();
for (ComponentConnector parent : parents) {
connectors.addAll(filterMatches(
collectPotentialMatches(parent, fragments[0],
findRecursively),
SelectorPredicate.extractPredicates(fragments[0])));
}
if (!connectors.isEmpty() && fragments.length > 1) {
return (findConnectorsByPath(fragments[1], connectors));
}
return eliminateDuplicates(connectors);
} | 3.68 |
hbase_MetricsSink_getAppliedOps | /**
* Gets the total number of OPs delivered to this sink.
*/
public long getAppliedOps() {
return this.mss.getSinkAppliedOps();
} | 3.68 |
pulsar_SubscribeRateLimiter_getAvailableSubscribeRateLimit | /**
* returns available subscribes if subscribe-throttling is enabled else it returns -1.
*
* @return
*/
public long getAvailableSubscribeRateLimit(ConsumerIdentifier consumerIdentifier) {
return subscribeRateLimiter.get(consumerIdentifier)
== null ? -1 : subscribeRateLimiter.get(consumerIdentifier).getAvailablePermits();
} | 3.68 |
dubbo_NetUtils_getIpByHost | /**
* @param hostName
* @return ip address or hostName if UnknownHostException
*/
public static String getIpByHost(String hostName) {
try {
return InetAddress.getByName(hostName).getHostAddress();
} catch (UnknownHostException e) {
return hostName;
}
} | 3.68 |
flink_MetricListener_getMeter | /**
* Get registered {@link Meter} with identifier relative to the root metric group.
*
* @param identifier identifier relative to the root metric group
* @return Optional registered meter
*/
public Optional<Meter> getMeter(String... identifier) {
return getMetric(Meter.class, identifier);
} | 3.68 |
flink_ProjectedRowData_from | /**
* Create an empty {@link ProjectedRowData} starting from a {@link Projection}.
*
* <p>Throws {@link IllegalStateException} if the provided {@code projection} array contains
* nested projections, which are not supported by {@link ProjectedRowData}.
*
* @see Projection
* @see ProjectedRowData
*/
public static ProjectedRowData from(Projection projection) {
return new ProjectedRowData(projection.toTopLevelIndexes());
} | 3.68 |
AreaShop_Utils_getDurationFromMinutesOrString | /**
* Get setting from config that could be only a number indicating minutes.
* or a string indicating a duration string.
* @param path Path of the setting to read
* @return milliseconds that the setting indicates
*/
public static long getDurationFromMinutesOrString(String path) {
if(config.isLong(path) || config.isInt(path)) {
long setting = config.getLong(path);
if(setting != -1) {
setting *= 60 * 1000;
}
return setting;
} else {
return durationStringToLong(config.getString(path));
}
} | 3.68 |
morf_MySqlDialect_connectionTestStatement | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#connectionTestStatement()
*/
@Override
public String connectionTestStatement() {
return "select 1";
} | 3.68 |
flink_QueryableStateConfiguration_numProxyQueryThreads | /** Returns the number of query threads for the queryable state client proxy. */
public int numProxyQueryThreads() {
return numPQueryThreads;
} | 3.68 |
hudi_SparkBasedReader_readAvro | // Spark anyways globs the path and gets all the paths in memory so take the List<filePaths> as an argument.
// https://github.com/apache/spark/.../org/apache/spark/sql/execution/datasources/DataSource.scala#L251
public static JavaRDD<GenericRecord> readAvro(SparkSession sparkSession, String schemaStr, List<String> listOfPaths,
Option<String> structName, Option<String> nameSpace) {
Dataset<Row> dataSet = sparkSession.read()
.format(SPARK_AVRO_FORMAT)
.option(AVRO_SCHEMA_OPTION_KEY, schemaStr)
.load(JavaConverters.asScalaIteratorConverter(listOfPaths.iterator()).asScala().toSeq());
return HoodieSparkUtils
.createRdd(dataSet.toDF(), structName.orElse(RowBasedSchemaProvider.HOODIE_RECORD_STRUCT_NAME),
nameSpace.orElse(RowBasedSchemaProvider.HOODIE_RECORD_NAMESPACE), false, Option.empty())
.toJavaRDD();
} | 3.68 |
querydsl_FunctionalHelpers_wrap | /**
* Wrap a Querydsl expression into a Guava function
*
* @param projection projection to wrap
* @return Guava function
*/
public static <F,T> Function<F,T> wrap(Expression<T> projection) {
Path<?> path = projection.accept(PathExtractor.DEFAULT, null);
if (path != null) {
final Evaluator<T> ev = createEvaluator(path.getRoot(), projection);
return ev::evaluate;
} else {
throw new IllegalArgumentException("No path in " + projection);
}
} | 3.68 |
flink_CastRule_create | /** Create a casting context. */
static Context create(
boolean isPrinting,
boolean legacyBehaviour,
ZoneId zoneId,
ClassLoader classLoader) {
return new Context() {
@Override
public boolean isPrinting() {
return isPrinting;
}
@Override
public boolean legacyBehaviour() {
return legacyBehaviour;
}
@Override
public ZoneId getSessionZoneId() {
return zoneId;
}
@Override
public ClassLoader getClassLoader() {
return classLoader;
}
};
} | 3.68 |
querydsl_GroupBy_map | /**
* Create a new aggregating map expression using a backing LinkedHashMap
*
* @param key key for the map entries
* @param value value for the map entries
* @return wrapper expression
*/
public static <K, V, T, U> AbstractGroupExpression<Pair<K, V>, Map<T, U>> map(GroupExpression<K, T> key,
GroupExpression<V, U> value) {
return new GMap.Mixin<K, V, T, U, Map<T, U>>(key, value, GMap.createLinked(QPair.create(key, value)));
} | 3.68 |
flink_OSSTestCredentials_getOSSAccessKey | /**
* Get OSS access key.
*
* @return OSS access key
*/
public static String getOSSAccessKey() {
if (ACCESS_KEY != null) {
return ACCESS_KEY;
} else {
throw new IllegalStateException("OSS access key is not available");
}
} | 3.68 |
hudi_AvroInternalSchemaConverter_nullableSchema | /** Returns schema with nullable true. */
public static Schema nullableSchema(Schema schema) {
if (schema.getType() == UNION) {
if (!isOptional(schema)) {
throw new HoodieSchemaException(String.format("Union schemas are not supported: %s", schema));
}
return schema;
} else {
return Schema.createUnion(Schema.create(Schema.Type.NULL), schema);
}
} | 3.68 |
hudi_ParquetSchemaConverter_toParquetType | /**
* Converts Flink Internal Type to Parquet schema.
*
* @param typeInformation Flink type information
* @param legacyMode is standard LIST and MAP schema or back-compatible schema
* @return Parquet schema
*/
public static MessageType toParquetType(
TypeInformation<?> typeInformation, boolean legacyMode) {
return (MessageType)
convertField(null, typeInformation, Type.Repetition.OPTIONAL, legacyMode);
} | 3.68 |
hadoop_FederationUtil_newSecretManager | /**
* Creates an instance of DelegationTokenSecretManager from the
* configuration.
*
* @param conf Configuration that defines the token manager class.
* @return New delegation token secret manager.
*/
public static AbstractDelegationTokenSecretManager<DelegationTokenIdentifier>
newSecretManager(Configuration conf) {
Class<? extends AbstractDelegationTokenSecretManager> clazz =
conf.getClass(
RBFConfigKeys.DFS_ROUTER_DELEGATION_TOKEN_DRIVER_CLASS,
RBFConfigKeys.DFS_ROUTER_DELEGATION_TOKEN_DRIVER_CLASS_DEFAULT,
AbstractDelegationTokenSecretManager.class);
return newInstance(conf, null, null, clazz);
} | 3.68 |
hadoop_ServiceShutdownHook_shutdown | /**
* Shutdown operation.
* <p>
* Subclasses may extend it, but it is primarily
* made available for testing.
* @return true if the service was stopped and no exception was raised.
*/
protected boolean shutdown() {
Service service;
boolean result = false;
synchronized (this) {
service = serviceRef.get();
serviceRef.clear();
}
if (service != null) {
try {
// Stop the Service
service.stop();
result = true;
} catch (Throwable t) {
LOG.info("Error stopping {}", service.getName(), t);
}
}
return result;
} | 3.68 |
flink_GlobalProperties_filterBySemanticProperties | /**
* Filters these GlobalProperties by the fields that are forwarded to the output as described by
* the SemanticProperties.
*
* @param props The semantic properties holding information about forwarded fields.
* @param input The index of the input.
* @return The filtered GlobalProperties
*/
public GlobalProperties filterBySemanticProperties(SemanticProperties props, int input) {
if (props == null) {
throw new NullPointerException("SemanticProperties may not be null.");
}
GlobalProperties gp = new GlobalProperties();
// filter partitioning
switch (this.partitioning) {
case RANGE_PARTITIONED:
// check if ordering is preserved
Ordering newOrdering = new Ordering();
for (int i = 0; i < this.ordering.getInvolvedIndexes().size(); i++) {
int sourceField = this.ordering.getInvolvedIndexes().get(i);
FieldSet targetField = props.getForwardingTargetFields(input, sourceField);
if (targetField == null || targetField.size() == 0) {
// partitioning is destroyed
newOrdering = null;
break;
} else {
// use any field of target fields for now. We should use something like
// field equivalence sets in the future.
if (targetField.size() > 1) {
LOG.warn(
"Found that a field is forwarded to more than one target field in "
+ "semantic forwarded field information. Will only use the field with the lowest index.");
}
newOrdering.appendOrdering(
targetField.toArray()[0],
this.ordering.getType(i),
this.ordering.getOrder(i));
}
}
if (newOrdering != null) {
gp.partitioning = PartitioningProperty.RANGE_PARTITIONED;
gp.ordering = newOrdering;
gp.partitioningFields = newOrdering.getInvolvedIndexes();
gp.distribution = this.distribution;
}
break;
case HASH_PARTITIONED:
case ANY_PARTITIONING:
case CUSTOM_PARTITIONING:
FieldList newPartitioningFields = new FieldList();
for (int sourceField : this.partitioningFields) {
FieldSet targetField = props.getForwardingTargetFields(input, sourceField);
if (targetField == null || targetField.size() == 0) {
newPartitioningFields = null;
break;
} else {
// use any field of target fields for now. We should use something like
// field equivalence sets in the future.
if (targetField.size() > 1) {
LOG.warn(
"Found that a field is forwarded to more than one target field in "
+ "semantic forwarded field information. Will only use the field with the lowest index.");
}
newPartitioningFields =
newPartitioningFields.addField(targetField.toArray()[0]);
}
}
if (newPartitioningFields != null) {
gp.partitioning = this.partitioning;
gp.partitioningFields = newPartitioningFields;
gp.customPartitioner = this.customPartitioner;
}
break;
case FORCED_REBALANCED:
case FULL_REPLICATION:
case RANDOM_PARTITIONED:
gp.partitioning = this.partitioning;
break;
default:
throw new RuntimeException("Unknown partitioning type.");
}
// filter unique field combinations
if (this.uniqueFieldCombinations != null) {
Set<FieldSet> newUniqueFieldCombinations = new HashSet<FieldSet>();
for (FieldSet fieldCombo : this.uniqueFieldCombinations) {
FieldSet newFieldCombo = new FieldSet();
for (Integer sourceField : fieldCombo) {
FieldSet targetField = props.getForwardingTargetFields(input, sourceField);
if (targetField == null || targetField.size() == 0) {
newFieldCombo = null;
break;
} else {
// use any field of target fields for now. We should use something like
// field equivalence sets in the future.
if (targetField.size() > 1) {
LOG.warn(
"Found that a field is forwarded to more than one target field in "
+ "semantic forwarded field information. Will only use the field with the lowest index.");
}
newFieldCombo = newFieldCombo.addField(targetField.toArray()[0]);
}
}
if (newFieldCombo != null) {
newUniqueFieldCombinations.add(newFieldCombo);
}
}
if (!newUniqueFieldCombinations.isEmpty()) {
gp.uniqueFieldCombinations = newUniqueFieldCombinations;
}
}
return gp;
} | 3.68 |
framework_AbstractSingleComponentContainerConnector_getContent | /**
* Returns the content (only/first child) of the container.
*
* @return child connector or null if none (e.g. invisible or not set on
* server)
*/
protected ComponentConnector getContent() {
List<ComponentConnector> children = getChildComponents();
if (children.isEmpty()) {
return null;
} else {
return children.get(0);
}
} | 3.68 |
hbase_QuotaObserverChore_addTableQuotaTable | /**
* Adds a table with a table quota.
*/
public void addTableQuotaTable(TableName tn) {
tablesWithTableQuotas.add(tn);
} | 3.68 |
flink_FlinkRexBuilder_makeIn | /**
* Convert the conditions into the {@code IN} and fix [CALCITE-4888]: Unexpected {@link RexNode}
* when call {@link RelBuilder#in} to create an {@code IN} predicate with a list of varchar
* literals which have different length in {@link RexBuilder#makeIn}.
*
* <p>The bug is because the origin implementation doesn't take {@link
* FlinkTypeSystem#shouldConvertRaggedUnionTypesToVarying} into consideration. When this is
* true, the behaviour should not padding char. Please see
* https://issues.apache.org/jira/browse/CALCITE-4590 and
* https://issues.apache.org/jira/browse/CALCITE-2321. Please refer to {@code
* org.apache.calcite.rex.RexSimplify.RexSargBuilder#getType} for the correct behaviour.
*
* <p>Once CALCITE-4888 is fixed, this method (and related methods) should be removed.
*/
@Override
@SuppressWarnings("unchecked")
public RexNode makeIn(RexNode arg, List<? extends RexNode> ranges) {
if (areAssignable(arg, ranges)) {
// Fix calcite doesn't check literal whether is NULL here
List<RexNode> rangeWithoutNull = new ArrayList<>();
boolean containsNull = false;
for (RexNode node : ranges) {
if (isNull(node)) {
containsNull = true;
} else {
rangeWithoutNull.add(node);
}
}
final Sarg sarg = toSarg(Comparable.class, rangeWithoutNull, containsNull);
if (sarg != null) {
List<RelDataType> distinctTypes =
Util.distinctList(
ranges.stream().map(RexNode::getType).collect(Collectors.toList()));
RelDataType commonType = getTypeFactory().leastRestrictive(distinctTypes);
return makeCall(
SqlStdOperatorTable.SEARCH,
arg,
makeSearchArgumentLiteral(sarg, commonType));
}
}
return RexUtil.composeDisjunction(
this,
ranges.stream()
.map(r -> makeCall(SqlStdOperatorTable.EQUALS, arg, r))
.collect(Util.toImmutableList()));
} | 3.68 |
hadoop_LoadManifestsStage_toString | /**
* To String includes all summary info except statistics.
* @return string value
*/
@Override
public String toString() {
final StringBuilder sb = new StringBuilder(
"SummaryInfo{");
sb.append("manifestCount=").append(getManifestCount());
sb.append(", fileCount=").append(getFileCount());
sb.append(", directoryCount=").append(getDirectoryCount());
sb.append(", totalFileSize=").append(
byteCountToDisplaySize(getTotalFileSize()));
sb.append('}');
return sb.toString();
} | 3.68 |
hbase_IncrementalBackupManager_getIncrBackupLogFileMap | /**
* Obtain the list of logs that need to be copied out for this incremental backup. The list is set
* in BackupInfo.
* @return The new HashMap of RS log time stamps after the log roll for this incremental backup.
* @throws IOException exception
*/
public Map<String, Long> getIncrBackupLogFileMap() throws IOException {
List<String> logList;
Map<String, Long> newTimestamps;
Map<String, Long> previousTimestampMins;
String savedStartCode = readBackupStartCode();
// key: tableName
// value: <RegionServer,PreviousTimeStamp>
Map<TableName, Map<String, Long>> previousTimestampMap = readLogTimestampMap();
previousTimestampMins = BackupUtils.getRSLogTimestampMins(previousTimestampMap);
if (LOG.isDebugEnabled()) {
LOG.debug("StartCode " + savedStartCode + "for backupID " + backupInfo.getBackupId());
}
// get all new log files from .logs and .oldlogs after last TS and before new timestamp
if (
savedStartCode == null || previousTimestampMins == null || previousTimestampMins.isEmpty()
) {
throw new IOException("Cannot read any previous back up timestamps from backup system table. "
+ "In order to create an incremental backup, at least one full backup is needed.");
}
LOG.info("Execute roll log procedure for incremental backup ...");
HashMap<String, String> props = new HashMap<>();
props.put("backupRoot", backupInfo.getBackupRootDir());
try (Admin admin = conn.getAdmin()) {
admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props);
}
newTimestamps = readRegionServerLastLogRollResult();
logList = getLogFilesForNewBackup(previousTimestampMins, newTimestamps, conf, savedStartCode);
logList = excludeProcV2WALs(logList);
backupInfo.setIncrBackupFileList(logList);
return newTimestamps;
} | 3.68 |
hbase_HFileReaderImpl_getFirstRowKey | /**
* TODO left from {@link HFile} version 1: move this to StoreFile after Ryan's patch goes in to
* eliminate {@link KeyValue} here.
* @return the first row key, or null if the file is empty.
*/
@Override
public Optional<byte[]> getFirstRowKey() {
// We have to copy the row part to form the row key alone
return getFirstKey().map(CellUtil::cloneRow);
} | 3.68 |
flink_FlinkAssertions_chainOfCauses | /**
* You can use this method in combination with {@link
* AbstractThrowableAssert#extracting(Function, AssertFactory)} to perform assertions on a chain
* of causes. For example:
*
* <pre>{@code
* assertThat(throwable)
* .extracting(FlinkAssertions::chainOfCauses, FlinkAssertions.STREAM_THROWABLE)
* }</pre>
*
* @return the list is ordered from the current {@link Throwable} up to the root cause.
*/
public static Stream<Throwable> chainOfCauses(Throwable throwable) {
if (throwable == null) {
return Stream.empty();
}
if (throwable.getCause() == null) {
return Stream.of(throwable);
}
return Stream.concat(Stream.of(throwable), chainOfCauses(throwable.getCause()));
} | 3.68 |
hadoop_IOStatisticsLogging_demandStringifyIOStatisticsSource | /**
* On demand stringifier of an IOStatisticsSource instance.
* <p>
* Whenever this object's toString() method is called, it evaluates the
* statistics.
* <p>
* This is designed to affordable to use in log statements.
* @param source source of statistics -may be null.
* @return an object whose toString() operation returns the current values.
*/
public static Object demandStringifyIOStatisticsSource(
@Nullable IOStatisticsSource source) {
return new SourceToString(source);
} | 3.68 |
rocketmq-connect_DebeziumOracleConnector_taskClass | /**
* Return the current connector class
*
* @return task implement class
*/
@Override
public Class<? extends Task> taskClass() {
return DebeziumOracleSource.class;
} | 3.68 |
hadoop_ActiveAuditManagerS3A_createExecutionInterceptors | /**
* Return a list of execution interceptors for the AWS SDK which
* relays to this class.
* @return a list of execution interceptors.
*/
@Override
public List<ExecutionInterceptor> createExecutionInterceptors()
throws IOException {
// wire up the AWS SDK To call back into this class when
// preparing to make S3 calls.
List<ExecutionInterceptor> executionInterceptors = new ArrayList<>();
executionInterceptors.add(this);
final String handlers = getConfig().getTrimmed(AUDIT_REQUEST_HANDLERS, "");
if (!handlers.isEmpty()) {
// warn and ignore v1 handlers.
V2Migration.v1RequestHandlersUsed(handlers);
}
// V2 SDK supports global/service interceptors, but they need to be configured on the
// classpath and don't get the filesystem/job configuration passed down.
final Class<?>[] interceptors = getConfig().getClasses(AUDIT_EXECUTION_INTERCEPTORS);
if (interceptors != null) {
for (Class<?> handler : interceptors) {
try {
LOG.debug("Adding intercept of class {}", handler);
Constructor<?> ctor = handler.getConstructor();
final ExecutionInterceptor interceptor = (ExecutionInterceptor) ctor.newInstance();
if (interceptor instanceof Configurable) {
// pass in the configuration.
((Configurable) interceptor).setConf(getConfig());
}
executionInterceptors.add(interceptor);
} catch (ExceptionInInitializerError e) {
throw FutureIO.unwrapInnerException(e);
} catch (Exception e) {
throw new IOException(e);
}
}
}
return executionInterceptors;
} | 3.68 |
hadoop_ProducerConsumer_addWorker | /**
* Add another worker that will consume WorkRequest{@literal <T>} items
* from input queue, process each item using supplied processor, and for
* every processed item output WorkReport{@literal <R>} to output queue.
*
* @param processor Processor implementing WorkRequestProcessor interface.
*
*/
public void addWorker(WorkRequestProcessor<T, R> processor) {
executor.execute(new Worker(processor));
} | 3.68 |
framework_UIDL_getDoubleVariable | /**
* Gets the value of the named variable.
*
* @param name
* the name of the variable
* @return the value of the variable
*/
public double getDoubleVariable(String name) {
return var().getRawNumber(name);
} | 3.68 |
dubbo_ServiceBean_getService | /**
* Gets associated {@link Service}
*
* @return associated {@link Service}
*/
public Service getService() {
return service;
} | 3.68 |
hbase_DynamicMetricsRegistry_getGauge | /**
* Get a MetricMutableGaugeLong from the storage. If it is not there atomically put it.
* @param gaugeName name of the gauge to create or get.
* @param potentialStartingValue value of the new gauge if we have to create it.
*/
public MutableGaugeLong getGauge(String gaugeName, long potentialStartingValue) {
// Try and get the guage.
MutableMetric metric = metricsMap.get(gaugeName);
// If it's not there then try and put a new one in the storage.
if (metric == null) {
// Create the potential new gauge.
MutableGaugeLong newGauge =
new MutableGaugeLong(new MetricsInfoImpl(gaugeName, ""), potentialStartingValue);
// Try and put the gauge in. This is atomic.
metric = metricsMap.putIfAbsent(gaugeName, newGauge);
// If the value we get back is null then the put was successful and we will return that.
// otherwise gaugeLong should contain the thing that was in before the put could be completed.
if (metric == null) {
return newGauge;
}
}
if (!(metric instanceof MutableGaugeLong)) {
throw new MetricsException("Metric already exists in registry for metric name: " + gaugeName
+ " and not of type MetricMutableGaugeLong");
}
return (MutableGaugeLong) metric;
} | 3.68 |
flink_LocatableInputSplitAssigner_getNextUnassignedMinLocalCountSplit | /**
* Retrieves a LocatableInputSplit with minimum local count. InputSplits which have already
* been assigned (i.e., which are not contained in the provided set) are filtered out. The
* returned input split is NOT removed from the provided set.
*
* @param unassignedSplits Set of unassigned input splits.
* @return An input split with minimum local count or null if all splits have been assigned.
*/
public LocatableInputSplitWithCount getNextUnassignedMinLocalCountSplit(
Set<LocatableInputSplitWithCount> unassignedSplits) {
if (splits.size() == 0) {
return null;
}
do {
elementCycleCount--;
// take first split of the list
LocatableInputSplitWithCount split = splits.pollFirst();
if (unassignedSplits.contains(split)) {
int localCount = split.getLocalCount();
// still unassigned, check local count
if (localCount > minLocalCount) {
// re-insert at end of the list and continue to look for split with smaller
// local count
splits.offerLast(split);
// check and update second smallest local count
if (nextMinLocalCount == -1 || split.getLocalCount() < nextMinLocalCount) {
nextMinLocalCount = split.getLocalCount();
}
split = null;
}
} else {
// split was already assigned
split = null;
}
if (elementCycleCount == 0) {
// one full cycle, but no split with min local count found
// update minLocalCnt and element cycle count for next pass over the splits
minLocalCount = nextMinLocalCount;
nextMinLocalCount = -1;
elementCycleCount = splits.size();
}
if (split != null) {
// found a split to assign
return split;
}
} while (elementCycleCount > 0);
// no split left
return null;
} | 3.68 |
framework_RendererCellReference_set | /**
* Sets the identifying information for this cell.
*
* @param cell
* the flyweight cell to reference
* @param columnIndex
* the index of the column in the grid, including hidden cells
* @param column
* the column to reference
*/
public void set(FlyweightCell cell, int columnIndex,
Grid.Column<?, ?> column) {
this.cell = cell;
super.set(cell.getColumn(), columnIndex,
(Grid.Column<?, Object>) column);
} | 3.68 |
hbase_CompactingMemStore_stopCompaction | /**
* The request to cancel the compaction asynchronous task (caused by in-memory flush) The
* compaction may still happen if the request was sent too late Non-blocking request
*/
private void stopCompaction() {
if (inMemoryCompactionInProgress.get()) {
compactor.stop();
}
} | 3.68 |
framework_FormLayout_getExpandRatio | /**
* @deprecated This method currently has no effect as expand ratios are not
* implemented in FormLayout
*/
@Override
@Deprecated
public float getExpandRatio(Component component) {
return super.getExpandRatio(component);
} | 3.68 |
hbase_RegionCoprocessorHost_postFlush | /**
* Invoked after a memstore flush
*/
public void postFlush(HStore store, HStoreFile storeFile, FlushLifeCycleTracker tracker)
throws IOException {
if (coprocEnvironments.isEmpty()) {
return;
}
execOperation(new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {
observer.postFlush(this, store, storeFile, tracker);
}
});
} | 3.68 |
hadoop_StageConfig_getName | /**
* Get name of task/job.
* @return name for logging.
*/
public String getName() {
return name;
} | 3.68 |
flink_PendingCheckpoint_setCancellerHandle | /**
* Sets the handle for the canceller to this pending checkpoint. This method fails with an
* exception if a handle has already been set.
*
* @return true, if the handle was set, false, if the checkpoint is already disposed;
*/
public boolean setCancellerHandle(ScheduledFuture<?> cancellerHandle) {
synchronized (lock) {
if (this.cancellerHandle == null) {
if (!disposed) {
this.cancellerHandle = cancellerHandle;
return true;
} else {
return false;
}
} else {
throw new IllegalStateException("A canceller handle was already set");
}
}
} | 3.68 |
hadoop_FileIoProvider_listDirectory | /**
* Get a listing of the given directory using
* {@link IOUtils#listDirectory(File, FilenameFilter)}.
*
* @param volume target volume. null if unavailable.
* @param dir Directory to list.
* @param filter {@link FilenameFilter} to filter the directory entries.
* @throws IOException
*/
public List<String> listDirectory(
@Nullable FsVolumeSpi volume, File dir,
FilenameFilter filter) throws IOException {
final long begin = profilingEventHook.beforeMetadataOp(volume, LIST);
try {
faultInjectorEventHook.beforeMetadataOp(volume, LIST);
List<String> children = IOUtils.listDirectory(dir, filter);
profilingEventHook.afterMetadataOp(volume, LIST, begin);
return children;
} catch(Exception e) {
onFailure(volume, begin);
throw e;
}
} | 3.68 |
flink_FileChannelMemoryMappedBoundedData_finishWrite | /**
* Finishes the current region and prevents further writes. After calling this method, further
* calls to {@link #writeBuffer(Buffer)} will fail.
*/
@Override
public void finishWrite() throws IOException {
mapRegionAndStartNext();
fileChannel.close();
} | 3.68 |
hadoop_AbfsHttpOperation_parseListFilesResponse | /**
* Parse the list file response
*
* @param stream InputStream contains the list results.
* @throws IOException
*/
private void parseListFilesResponse(final InputStream stream) throws IOException {
if (stream == null) {
return;
}
if (listResultSchema != null) {
// already parse the response
return;
}
try {
final ObjectMapper objectMapper = new ObjectMapper();
this.listResultSchema = objectMapper.readValue(stream, ListResultSchema.class);
} catch (IOException ex) {
LOG.error("Unable to deserialize list results", ex);
throw ex;
}
} | 3.68 |
framework_VFilterSelect_getStyle | /**
* Gets the style set for this suggestion item. Styles are typically set
* by a server-side {@link com.vaadin.ui.ComboBox.ItemStyleGenerator}.
* The returned style is prefixed by <code>v-filterselect-item-</code>.
*
* @since 7.5.6
* @return the style name to use, or <code>null</code> to not apply any
* custom style.
*/
public String getStyle() {
return style;
} | 3.68 |
hbase_Query_getConsistency | /**
* Returns the consistency level for this operation
* @return the consistency level
*/
public Consistency getConsistency() {
return consistency;
} | 3.68 |
hbase_HFileBlockIndex_getCacheOnWrite | /**
* @return true if we are using cache-on-write. This is configured by the caller of the
* constructor by either passing a valid block cache or null.
*/
@Override
public boolean getCacheOnWrite() {
return cacheConf != null && cacheConf.shouldCacheIndexesOnWrite();
} | 3.68 |
hadoop_BlockPoolTokenSecretManager_checkAccess | /**
* See {@link BlockTokenSecretManager#checkAccess(Token, String,
* ExtendedBlock, BlockTokenIdentifier.AccessMode,
* StorageType[], String[])}
*/
public void checkAccess(Token<BlockTokenIdentifier> token,
String userId, ExtendedBlock block, AccessMode mode,
StorageType[] storageTypes, String[] storageIds)
throws InvalidToken {
get(block.getBlockPoolId()).checkAccess(token, userId, block, mode,
storageTypes, storageIds);
} | 3.68 |
hbase_FileLink_getBackReferencesDir | /**
* Get the directory to store the link back references
* <p>
* To simplify the reference count process, during the FileLink creation a back-reference is added
* to the back-reference directory of the specified file.
* @param storeDir Root directory for the link reference folder
* @param fileName File Name with links
* @return Path for the link back references.
*/
public static Path getBackReferencesDir(final Path storeDir, final String fileName) {
return new Path(storeDir, BACK_REFERENCES_DIRECTORY_PREFIX + fileName);
} | 3.68 |
querydsl_StringExpression_equalsIgnoreCase | /**
* Create a {@code this.equalsIgnoreCase(str)} expression
*
* <p>Compares this {@code StringExpression} to another {@code StringExpression}, ignoring case
* considerations.</p>
*
* @param str string
* @return this.equalsIgnoreCase(str)
* @see java.lang.String#equalsIgnoreCase(String)
*/
public BooleanExpression equalsIgnoreCase(String str) {
return equalsIgnoreCase(ConstantImpl.create(str));
} | 3.68 |
framework_SQLContainer_setPageLengthInternal | /**
* Sets the page length internally, without refreshing the container.
*
* @param pageLength
* the new page length
*/
private void setPageLengthInternal(int pageLength) {
this.pageLength = pageLength > 0 ? pageLength : DEFAULT_PAGE_LENGTH;
cacheOverlap = getPageLength();
cachedItems.setCacheLimit(CACHE_RATIO * getPageLength() + cacheOverlap);
} | 3.68 |
framework_ContainerOrderedWrapper_updateOrderWrapper | /**
* Updates the wrapper's internal ordering information to include all Items
* in the underlying container.
* <p>
* Note : If the contents of the wrapped container change without the
* wrapper's knowledge, this method needs to be called to update the
* ordering information of the Items.
* </p>
*/
public void updateOrderWrapper() {
if (!ordered) {
final Collection<?> ids = container.getItemIds();
// Recreates ordering if some parts of it are missing
if (next == null || first == null || last == null || prev == null) {
first = null;
last = null;
next = new Hashtable<Object, Object>();
prev = new Hashtable<Object, Object>();
}
// Filter out all the missing items
final LinkedList<?> l = new LinkedList<Object>(next.keySet());
for (final Object id : l) {
if (!container.containsId(id)) {
removeFromOrderWrapper(id);
}
}
// Adds missing items
for (final Object id : ids) {
if (!next.containsKey(id) && last != id) {
addToOrderWrapper(id);
}
}
}
} | 3.68 |
hadoop_SessionTokenIdentifier_getMarshalledCredentials | /**
* Get the marshalled credentials.
* @return marshalled AWS credentials.
*/
public MarshalledCredentials getMarshalledCredentials() {
return marshalledCredentials;
} | 3.68 |
hadoop_ResourceRequestSetKey_extractMatchingKey | /**
* Extract the corresponding ResourceRequestSetKey for an allocated container
* from a given set. Return null if not found.
*
* @param container the allocated container
* @param keys the set of keys to look from
* @return ResourceRequestSetKey
*/
public static ResourceRequestSetKey extractMatchingKey(Container container,
Set<ResourceRequestSetKey> keys) {
ResourceRequestSetKey resourceRequestSetKey = new ResourceRequestSetKey(
container.getAllocationRequestId(), container.getPriority(),
container.getResource(), container.getExecutionType());
if (keys.contains(resourceRequestSetKey)) {
return resourceRequestSetKey;
}
if (container.getAllocationRequestId() > 0) {
// If no exact match, look for the one with the same (non-zero)
// allocationRequestId
for (ResourceRequestSetKey candidate : keys) {
if (candidate.getAllocationRequestId() == container.getAllocationRequestId()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Using possible match for {} : {}", resourceRequestSetKey, candidate);
}
return candidate;
}
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("not match found for container {}.", container.getId());
for (ResourceRequestSetKey candidate : keys) {
LOG.debug("candidate set keys: {}.", candidate.toString());
}
}
return null;
} | 3.68 |
hbase_MasterCoprocessorHost_preSplitRegionAction | /**
* Invoked just before a split
* @param tableName the table where the region belongs to
* @param splitRow the split point
* @param user the user
*/
public void preSplitRegionAction(final TableName tableName, final byte[] splitRow,
final User user) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) {
@Override
public void call(MasterObserver observer) throws IOException {
observer.preSplitRegionAction(this, tableName, splitRow);
}
});
} | 3.68 |
shardingsphere-elasticjob_JobRegistry_getInstance | /**
* Get instance of job registry.
*
* @return instance of job registry
*/
public static JobRegistry getInstance() {
if (null == instance) {
synchronized (JobRegistry.class) {
if (null == instance) {
instance = new JobRegistry();
}
}
}
return instance;
} | 3.68 |
shardingsphere-elasticjob_QueryParameterMap_toSingleValueMap | /**
* Convert to a single value map, abandon values except the first of each parameter.
*
* @return single value map
*/
public Map<String, String> toSingleValueMap() {
return queryMap.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().get(0)));
} | 3.68 |
hbase_WALKeyImpl_getEncodedRegionName | /** Returns encoded region name */
@Override
public byte[] getEncodedRegionName() {
return encodedRegionName;
} | 3.68 |
hadoop_RouterRMAdminService_initializePipeline | /**
* Initializes the request interceptor pipeline for the specified user.
*
* @param user
*/
private RequestInterceptorChainWrapper initializePipeline(String user) {
synchronized (this.userPipelineMap) {
if (this.userPipelineMap.containsKey(user)) {
LOG.info("Request to start an already existing user: {}"
+ " was received, so ignoring.", user);
return userPipelineMap.get(user);
}
RequestInterceptorChainWrapper chainWrapper =
new RequestInterceptorChainWrapper();
try {
// We should init the pipeline instance after it is created and then
// add to the map, to ensure thread safe.
LOG.info("Initializing request processing pipeline for user: {}.", user);
RMAdminRequestInterceptor interceptorChain =
this.createRequestInterceptorChain();
interceptorChain.init(user);
chainWrapper.init(interceptorChain);
} catch (Exception e) {
LOG.error("Init RMAdminRequestInterceptor error for user: {}.", user, e);
throw e;
}
this.userPipelineMap.put(user, chainWrapper);
return chainWrapper;
}
} | 3.68 |
flink_CheckpointConfig_getMinPauseBetweenCheckpoints | /**
* Gets the minimal pause between checkpointing attempts. This setting defines how soon the
* checkpoint coordinator may trigger another checkpoint after it becomes possible to trigger
* another checkpoint with respect to the maximum number of concurrent checkpoints (see {@link
* #getMaxConcurrentCheckpoints()}).
*
* @return The minimal pause before the next checkpoint is triggered.
*/
public long getMinPauseBetweenCheckpoints() {
return configuration
.get(ExecutionCheckpointingOptions.MIN_PAUSE_BETWEEN_CHECKPOINTS)
.toMillis();
} | 3.68 |
flink_TSetClientInfoResp_findByThriftIdOrThrow | /**
* Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null)
throw new java.lang.IllegalArgumentException(
"Field " + fieldId + " doesn't exist!");
return fields;
} | 3.68 |
graphhopper_Service_checkOverlap | /**
* Checks for overlapping days of week between two service calendars
* @param s1
* @param s2
* @return true if both calendars simultaneously operate on at least one day of the week
*/
public static boolean checkOverlap (Service s1, Service s2) {
if (s1.calendar == null || s2.calendar == null) {
return false;
}
// overlap exists if at least one day of week is shared by two calendars
boolean overlappingDays = s1.calendar.monday == 1 && s2.calendar.monday == 1 ||
s1.calendar.tuesday == 1 && s2.calendar.tuesday == 1 ||
s1.calendar.wednesday == 1 && s2.calendar.wednesday == 1 ||
s1.calendar.thursday == 1 && s2.calendar.thursday == 1 ||
s1.calendar.friday == 1 && s2.calendar.friday == 1 ||
s1.calendar.saturday == 1 && s2.calendar.saturday == 1 ||
s1.calendar.sunday == 1 && s2.calendar.sunday == 1;
return overlappingDays;
} | 3.68 |
dubbo_BaseServiceMetadata_revertDisplayServiceKey | /**
* revert of org.apache.dubbo.common.ServiceDescriptor#getDisplayServiceKey()
*
* @param displayKey
* @return
*/
public static BaseServiceMetadata revertDisplayServiceKey(String displayKey) {
String[] eles = StringUtils.split(displayKey, COLON_SEPARATOR);
if (eles == null || eles.length < 1 || eles.length > 2) {
return new BaseServiceMetadata();
}
BaseServiceMetadata serviceDescriptor = new BaseServiceMetadata();
serviceDescriptor.setServiceInterfaceName(eles[0]);
if (eles.length == 2) {
serviceDescriptor.setVersion(eles[1]);
}
return serviceDescriptor;
} | 3.68 |
dubbo_FrameworkModel_getAllApplicationModels | /**
* Get all application models including the internal application model.
*/
public List<ApplicationModel> getAllApplicationModels() {
synchronized (globalLock) {
return Collections.unmodifiableList(applicationModels);
}
} | 3.68 |
flink_SqlLikeChainChecker_checkEnd | /** Matches the ending of each string to its pattern. */
private static boolean checkEnd(
BinaryStringData pattern, MemorySegment[] segments, int start, int len) {
int lenSub = pattern.getSizeInBytes();
return len >= lenSub
&& SegmentsUtil.equals(
pattern.getSegments(), 0, segments, start + len - lenSub, lenSub);
} | 3.68 |
flink_ZooKeeperUtils_useNamespaceAndEnsurePath | /**
* Returns a facade of the client that uses the specified namespace, and ensures that all nodes
* in the path exist.
*
* @param client ZK client
* @param path the new namespace
* @return ZK Client that uses the new namespace
* @throws Exception ZK errors
*/
public static CuratorFramework useNamespaceAndEnsurePath(
final CuratorFramework client, final String path) throws Exception {
checkNotNull(client, "client must not be null");
checkNotNull(path, "path must not be null");
// Ensure that the checkpoints path exists
client.newNamespaceAwareEnsurePath(path).ensure(client.getZookeeperClient());
// All operations will have the path as root
final String newNamespace = generateZookeeperPath(client.getNamespace(), path);
return client.usingNamespace(
// Curator prepends a '/' manually and throws an Exception if the
// namespace starts with a '/'.
trimStartingSlash(newNamespace));
} | 3.68 |
flink_ProjectOperator_projectTuple18 | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>
ProjectOperator<
T,
Tuple18<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17>>
projectTuple18() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<
Tuple18<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17>>
tType =
new TupleTypeInfo<
Tuple18<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17>>(fTypes);
return new ProjectOperator<
T,
Tuple18<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17>>(this.ds, this.fieldIndexes, tType);
} | 3.68 |
morf_SqlUtils_field | /**
* Constructs a new field with a given name
*
* <p>Consider using {@link TableReference#field(String)} instead.</p>
*
* @param fieldName the name of the field
* @return {@link FieldReference}
* @see TableReference#field(String)
*/
public static FieldReference field(String fieldName) {
return new FieldReference(fieldName);
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.