name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_ColumnSchemaModel___setBlockcache | /**
* @param value the desired value of the BLOCKCACHE attribute
*/
public void __setBlockcache(boolean value) {
attrs.put(BLOCKCACHE, Boolean.toString(value));
} | 3.68 |
framework_FocusUtil_setTabIndex | /**
* Sets the widget's position in the tab index. If more than one widget has
* the same tab index, each such widget will receive focus in an arbitrary
* order. Setting the tab index to <code>-1</code> will cause the widget to
* be removed from the tab order.
*
* @param focusable
* The widget
* @param tabIndex
* the widget's tab index
*/
public static void setTabIndex(Widget focusable, int tabIndex) {
assert (focusable != null && focusable
.getElement() != null) : "Can't setTabIndex for a widget without an element";
focusable.getElement().setTabIndex(tabIndex);
} | 3.68 |
flink_HiveParserTypeCheckProcFactory_getDefaultExprProcessor | /** Factory method to get DefaultExprProcessor. */
public HiveParserTypeCheckProcFactory.DefaultExprProcessor getDefaultExprProcessor() {
return new HiveParserTypeCheckProcFactory.DefaultExprProcessor();
} | 3.68 |
flink_HiveServer2Endpoint_waitUntilOperationIsTerminated | /**
* Similar solution comparing to the {@code
* org.apache.hive.jdbc.HiveStatement#waitForOperationToComplete}.
*
* <p>The better solution is to introduce an interface similar to {@link TableResult#await()}.
*/
private void waitUntilOperationIsTerminated(
SessionHandle sessionHandle, OperationHandle operationHandle) throws Exception {
OperationInfo info;
do {
info = service.getOperationInfo(sessionHandle, operationHandle);
switch (info.getStatus()) {
case INITIALIZED:
case PENDING:
case RUNNING:
Thread.sleep(CHECK_INTERVAL_MS);
break;
case CANCELED:
case TIMEOUT:
throw new SqlGatewayException(
String.format(
"The operation %s's status is %s for the session %s.",
operationHandle, info.getStatus(), sessionHandle));
case ERROR:
throw new SqlGatewayException(
String.format(
"The operation %s's status is %s for the session %s.",
operationHandle, info.getStatus(), sessionHandle),
info.getException()
.orElseThrow(
() ->
new SqlGatewayException(
"Impossible! ERROR status should contains the error.")));
case FINISHED:
return;
default:
throw new SqlGatewayException(
String.format("Unknown status: %s.", info.getStatus()));
}
} while (true);
} | 3.68 |
morf_UpdateStatementBuilder_getFields | /**
* Gets the list of fields
*
* @return the fields
*/
List<AliasedField> getFields() {
return fields;
} | 3.68 |
hadoop_RollingWindowAverage_cleanupOldPoints | /**
* Clean up points that don't count any more (are before our
* rolling window) from our current queue of points.
*/
private void cleanupOldPoints() {
Date cutoffTime = new Date(new Date().getTime() - windowSizeMs);
while (!currentPoints.isEmpty()
&& currentPoints.peekFirst().getEventTime().before(cutoffTime)) {
currentPoints.removeFirst();
}
} | 3.68 |
hadoop_LoggedJob_getJobProperties | /**
* Get the configuration properties of the job.
*/
public JobProperties getJobProperties() {
return jobProperties;
} | 3.68 |
AreaShop_SignLinkerManager_isInSignLinkMode | /**
* Check if the player is in sign linking mode.
* @param player The player to check
* @return true if the player is in sign linking mode, otherwise false
*/
public boolean isInSignLinkMode(Player player) {
return signLinkers.containsKey(player.getUniqueId());
} | 3.68 |
framework_StaticSection_removeColumn | /**
* Removes the cell corresponding to the given column id.
*
* @param columnId
* the id of the column whose cell to remove
*/
public void removeColumn(String columnId) {
for (ROW row : rows) {
row.removeCell(columnId);
}
markAsDirty();
} | 3.68 |
shardingsphere-elasticjob_ShardingService_getShardingItems | /**
* Get sharding items.
*
* @param jobInstanceId job instance ID
* @return sharding items
*/
public List<Integer> getShardingItems(final String jobInstanceId) {
JobInstance jobInstance = YamlEngine.unmarshal(jobNodeStorage.getJobNodeData(instanceNode.getInstancePath(jobInstanceId)), JobInstance.class);
if (!serverService.isAvailableServer(jobInstance.getServerIp())) {
return Collections.emptyList();
}
List<Integer> result = new LinkedList<>();
int shardingTotalCount = configService.load(true).getShardingTotalCount();
for (int i = 0; i < shardingTotalCount; i++) {
if (jobInstance.getJobInstanceId().equals(jobNodeStorage.getJobNodeData(ShardingNode.getInstanceNode(i)))) {
result.add(i);
}
}
return result;
} | 3.68 |
hadoop_TimelinePutResponse_setEntityType | /**
* Set the entity type
*
* @param entityType
* the entity type
*/
public void setEntityType(String entityType) {
this.entityType = entityType;
} | 3.68 |
graphhopper_GHPoint_toGeoJson | /**
* Attention: geoJson is LON,LAT
*/
public Double[] toGeoJson() {
return new Double[]{lon, lat};
} | 3.68 |
hadoop_AbfsInputStream_seek | /**
* Seek to given position in stream.
* @param n position to seek to
* @throws IOException if there is an error
* @throws EOFException if attempting to seek past end of file
*/
@Override
public synchronized void seek(long n) throws IOException {
LOG.debug("requested seek to position {}", n);
if (closed) {
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
}
if (n < 0) {
throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
}
if (n > contentLength) {
throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
}
if (streamStatistics != null) {
streamStatistics.seek(n, fCursor);
}
// next read will read from here
nextReadPos = n;
LOG.debug("set nextReadPos to {}", nextReadPos);
} | 3.68 |
framework_AbstractInMemoryContainer_hasContainerFilters | /**
* Returns true if any filters have been applied to the container.
*
* @return true if the container has filters applied, false otherwise
* @since 7.1
*/
protected boolean hasContainerFilters() {
return !getContainerFilters().isEmpty();
} | 3.68 |
hudi_BufferedRandomAccessFile_spaceAvailableInBuffer | /**
* @return - whether space is available at the end of the buffer.
*/
private boolean spaceAvailableInBuffer() {
return (this.isEOF && (this.validLastPosition < this.endPosition()));
} | 3.68 |
dubbo_DubboBeanUtils_registerCommonBeans | /**
* Register the common beans
*
* @param registry {@link BeanDefinitionRegistry}
* @see ReferenceAnnotationBeanPostProcessor
* @see DubboConfigDefaultPropertyValueBeanPostProcessor
* @see DubboConfigAliasPostProcessor
*/
static void registerCommonBeans(BeanDefinitionRegistry registry) {
registerInfrastructureBean(registry, ServicePackagesHolder.BEAN_NAME, ServicePackagesHolder.class);
registerInfrastructureBean(registry, ReferenceBeanManager.BEAN_NAME, ReferenceBeanManager.class);
// Since 2.5.7 Register @Reference Annotation Bean Processor as an infrastructure Bean
registerInfrastructureBean(
registry, ReferenceAnnotationBeanPostProcessor.BEAN_NAME, ReferenceAnnotationBeanPostProcessor.class);
// TODO Whether DubboConfigAliasPostProcessor can be removed ?
// Since 2.7.4 [Feature] https://github.com/apache/dubbo/issues/5093
registerInfrastructureBean(
registry, DubboConfigAliasPostProcessor.BEAN_NAME, DubboConfigAliasPostProcessor.class);
// register ApplicationListeners
registerInfrastructureBean(
registry, DubboDeployApplicationListener.class.getName(), DubboDeployApplicationListener.class);
registerInfrastructureBean(
registry, DubboConfigApplicationListener.class.getName(), DubboConfigApplicationListener.class);
// Since 2.7.6 Register DubboConfigDefaultPropertyValueBeanPostProcessor as an infrastructure Bean
registerInfrastructureBean(
registry,
DubboConfigDefaultPropertyValueBeanPostProcessor.BEAN_NAME,
DubboConfigDefaultPropertyValueBeanPostProcessor.class);
// Dubbo config initializer
registerInfrastructureBean(registry, DubboConfigBeanInitializer.BEAN_NAME, DubboConfigBeanInitializer.class);
// register infra bean if not exists later
registerInfrastructureBean(
registry, DubboInfraBeanRegisterPostProcessor.BEAN_NAME, DubboInfraBeanRegisterPostProcessor.class);
} | 3.68 |
morf_Function_dateToYyyymmdd | /**
* Helper method to create an instance of the "DATE_TO_YYYYMMDD" SQL function.
* {@code expression} must result in a string.
*
* @see Cast
* @param expression the expression to evaluate
* @return an instance of the DATE_TO_YYYYMMDD function
*/
public static Function dateToYyyymmdd(AliasedField expression) {
return new Function(FunctionType.DATE_TO_YYYYMMDD, expression);
} | 3.68 |
hbase_HRegion_getBaseConf | /**
* A split takes the config from the parent region & passes it to the daughter region's
* constructor. If 'conf' was passed, you would end up using the HTD of the parent region in
* addition to the new daughter HTD. Pass 'baseConf' to the daughter regions to avoid this tricky
* dedupe problem.
* @return Configuration object
*/
Configuration getBaseConf() {
return this.baseConf;
} | 3.68 |
flink_TableConfig_addConfiguration | /**
* Adds the given key-value configuration to the underlying application-specific configuration.
* It overwrites existing keys.
*
* @param configuration key-value configuration to be added
*/
public void addConfiguration(Configuration configuration) {
Preconditions.checkNotNull(configuration);
this.configuration.addAll(configuration);
} | 3.68 |
graphhopper_CHMeasurement_testPerformanceAutomaticNodeOrdering | /**
* Parses a given osm file, contracts the graph and runs random routing queries on it. This is useful to test
* the node contraction heuristics with regards to the performance of the automatic graph contraction (the node
* contraction order determines how many and which shortcuts will be introduced) and the resulting query speed.
* The queries are compared with a normal AStar search for comparison and to ensure correctness.
*/
private static void testPerformanceAutomaticNodeOrdering(String[] args) {
// example args:
// map=berlin.pbf stats_file=stats.dat period_updates=0 lazy_updates=100 neighbor_updates=50 max_neighbor_updatse=3 contract_nodes=100 log_messages=20 edge_quotient_weight=100.0 orig_edge_quotient_weight=100.0 hierarchy_depth_weight=20.0 landmarks=0 cleanup=true turncosts=true threshold=0.1 seed=456 comp_iterations=10 perf_iterations=100 quick=false
long start = nanoTime();
PMap map = PMap.read(args);
GraphHopperConfig ghConfig = new GraphHopperConfig(map);
LOGGER.info("Running analysis with parameters {}", ghConfig);
String osmFile = ghConfig.getString("map", "map-matching/files/leipzig_germany.osm.pbf");
ghConfig.putObject("datareader.file", osmFile);
final String statsFile = ghConfig.getString("stats_file", null);
final int periodicUpdates = ghConfig.getInt("period_updates", 0);
final int lazyUpdates = ghConfig.getInt("lazy_updates", 100);
final int neighborUpdates = ghConfig.getInt("neighbor_updates", 50);
final int maxNeighborUpdates = ghConfig.getInt("max_neighbor_updates", 3);
final int contractedNodes = ghConfig.getInt("contract_nodes", 100);
final int logMessages = ghConfig.getInt("log_messages", 20);
final float edgeQuotientWeight = ghConfig.getFloat("edge_quotient_weight", 100.0f);
final float origEdgeQuotientWeight = ghConfig.getFloat("orig_edge_quotient_weight", 100.0f);
final float hierarchyDepthWeight = ghConfig.getFloat("hierarchy_depth_weight", 20.0f);
final int pollFactorHeuristic = ghConfig.getInt("poll_factor_heur", 5);
final int pollFactorContraction = ghConfig.getInt("poll_factor_contr", 200);
final int landmarks = ghConfig.getInt("landmarks", 0);
final boolean cleanup = ghConfig.getBool("cleanup", true);
final boolean withTurnCosts = ghConfig.getBool("turncosts", true);
final int uTurnCosts = ghConfig.getInt(Parameters.Routing.U_TURN_COSTS, 80);
final double errorThreshold = ghConfig.getDouble("threshold", 0.1);
final long seed = ghConfig.getLong("seed", 456);
final int compIterations = ghConfig.getInt("comp_iterations", 100);
final int perfIterations = ghConfig.getInt("perf_iterations", 1000);
final boolean quick = ghConfig.getBool("quick", false);
final GraphHopper graphHopper = new GraphHopper();
String profile = "car_profile";
if (withTurnCosts) {
ghConfig.putObject("graph.vehicles", "car|turn_costs=true");
ghConfig.setProfiles(Collections.singletonList(
new Profile(profile).setVehicle("car").setTurnCosts(true).putHint(Parameters.Routing.U_TURN_COSTS, uTurnCosts)
));
ghConfig.setCHProfiles(Collections.singletonList(
new CHProfile(profile)
));
if (landmarks > 0) {
ghConfig.setLMProfiles(Collections.singletonList(
new LMProfile(profile)
));
ghConfig.putObject("prepare.lm.landmarks", landmarks);
}
} else {
ghConfig.putObject("graph.vehicles", "car");
ghConfig.setProfiles(Collections.singletonList(
new Profile(profile).setVehicle("car").setTurnCosts(false)
));
}
ghConfig.putObject(PERIODIC_UPDATES, periodicUpdates);
ghConfig.putObject(LAST_LAZY_NODES_UPDATES, lazyUpdates);
ghConfig.putObject(NEIGHBOR_UPDATES, neighborUpdates);
ghConfig.putObject(NEIGHBOR_UPDATES_MAX, maxNeighborUpdates);
ghConfig.putObject(CONTRACTED_NODES, contractedNodes);
ghConfig.putObject(LOG_MESSAGES, logMessages);
if (withTurnCosts) {
ghConfig.putObject(EDGE_QUOTIENT_WEIGHT, edgeQuotientWeight);
ghConfig.putObject(ORIGINAL_EDGE_QUOTIENT_WEIGHT, origEdgeQuotientWeight);
ghConfig.putObject(HIERARCHY_DEPTH_WEIGHT, hierarchyDepthWeight);
ghConfig.putObject(MAX_POLL_FACTOR_HEURISTIC_EDGE, pollFactorHeuristic);
ghConfig.putObject(MAX_POLL_FACTOR_CONTRACTION_EDGE, pollFactorContraction);
} else {
ghConfig.putObject(MAX_POLL_FACTOR_HEURISTIC_NODE, pollFactorHeuristic);
ghConfig.putObject(MAX_POLL_FACTOR_CONTRACTION_NODE, pollFactorContraction);
}
LOGGER.info("Initializing graph hopper with args: {}", ghConfig);
graphHopper.init(ghConfig);
if (cleanup) {
graphHopper.clean();
}
PMap results = new PMap(ghConfig.asPMap());
StopWatch sw = new StopWatch();
sw.start();
graphHopper.importOrLoad();
sw.stop();
results.putObject("_prepare_time", sw.getSeconds());
LOGGER.info("Import and preparation took {}s", sw.getMillis() / 1000);
if (!quick) {
runCompareTest(DIJKSTRA_BI, graphHopper, withTurnCosts, uTurnCosts, seed, compIterations, errorThreshold, results);
runCompareTest(ASTAR_BI, graphHopper, withTurnCosts, uTurnCosts, seed, compIterations, errorThreshold, results);
}
if (!quick) {
runPerformanceTest(DIJKSTRA_BI, graphHopper, withTurnCosts, seed, perfIterations, results);
}
runPerformanceTest(ASTAR_BI, graphHopper, withTurnCosts, seed, perfIterations, results);
if (!quick && landmarks > 0) {
runPerformanceTest("lm", graphHopper, withTurnCosts, seed, perfIterations, results);
}
graphHopper.close();
Map<String, Object> resultMap = results.toMap();
TreeSet<String> sortedKeys = new TreeSet<>(resultMap.keySet());
for (String key : sortedKeys) {
LOGGER.info(key + "=" + resultMap.get(key));
}
if (statsFile != null) {
File f = new File(statsFile);
boolean writeHeader = !f.exists();
try (OutputStream os = new FileOutputStream(f, true);
Writer writer = new OutputStreamWriter(os, StandardCharsets.UTF_8)) {
if (writeHeader)
writer.write(getHeader(sortedKeys));
writer.write(getStatLine(sortedKeys, resultMap));
} catch (IOException e) {
LOGGER.error("Could not write summary to file '{}'", statsFile, e);
}
}
// output to be used by external caller
StringBuilder sb = new StringBuilder();
for (String key : sortedKeys) {
sb.append(key).append(":").append(resultMap.get(key)).append(";");
}
sb.deleteCharAt(sb.lastIndexOf(";"));
System.out.println(sb);
LOGGER.info("Total time: {}s", fmt((nanoTime() - start) * 1.e-9));
} | 3.68 |
hadoop_FileIoProvider_exists | /**
* Check for file existence using {@link File#exists()}.
*
* @param volume target volume. null if unavailable.
* @param f file object.
* @return true if the file exists.
*/
public boolean exists(@Nullable FsVolumeSpi volume, File f) {
final long begin = profilingEventHook.beforeMetadataOp(volume, EXISTS);
try {
faultInjectorEventHook.beforeMetadataOp(volume, EXISTS);
boolean exists = f.exists();
profilingEventHook.afterMetadataOp(volume, EXISTS, begin);
return exists;
} catch(Exception e) {
onFailure(volume, begin);
throw e;
}
} | 3.68 |
framework_FieldBinder_getUnboundFields | /**
* Returns a collection of field names that are not bound.
*
* @return a collection of fields assignable to Component that are not bound
*/
public Collection<String> getUnboundFields() throws FieldBindingException {
List<String> unboundFields = new ArrayList<>();
for (Field f : fieldMap.values()) {
try {
Object value = getFieldValue(bindTarget, f);
if (value == null) {
unboundFields.add(f.getName());
}
} catch (IllegalArgumentException | IllegalAccessException e) {
throw new FieldBindingException("Could not get field value", e);
}
}
if (!unboundFields.isEmpty()) {
getLogger().severe(
"Found unbound fields in component root :" + unboundFields);
}
return unboundFields;
} | 3.68 |
hadoop_SchedulerHealth_getResourcesReleased | /**
* Get the resources released in the last scheduler run.
*
* @return resources released
*/
public Resource getResourcesReleased() {
return getResourceDetails(Operation.RELEASE);
} | 3.68 |
flink_CopyOnWriteSkipListStateMap_totalSize | /** Returns total size of this map, including logically removed state. */
int totalSize() {
return totalSize;
} | 3.68 |
flink_JobResultDeserializer_assertNotEndOfInput | /** Asserts that the provided JsonToken is not null, i.e., not at the end of the input. */
private static void assertNotEndOfInput(
final JsonParser p, @Nullable final JsonToken jsonToken) {
checkState(jsonToken != null, "Unexpected end of input at %s", p.getCurrentLocation());
} | 3.68 |
hbase_RegionGroupingProvider_getStrategy | /**
* instantiate a strategy from a config property. requires conf to have already been set (as well
* as anything the provider might need to read).
*/
RegionGroupingStrategy getStrategy(final Configuration conf, final String key,
final String defaultValue) throws IOException {
Class<? extends RegionGroupingStrategy> clazz;
try {
clazz = Strategies.valueOf(conf.get(key, defaultValue)).clazz;
} catch (IllegalArgumentException exception) {
// Fall back to them specifying a class name
// Note that the passed default class shouldn't actually be used, since the above only fails
// when there is a config value present.
clazz = conf.getClass(key, IdentityGroupingStrategy.class, RegionGroupingStrategy.class);
}
LOG.info("Instantiating RegionGroupingStrategy of type " + clazz);
try {
final RegionGroupingStrategy result = clazz.getDeclaredConstructor().newInstance();
result.init(conf, providerId);
return result;
} catch (Exception e) {
LOG.error(
"couldn't set up region grouping strategy, check config key " + REGION_GROUPING_STRATEGY);
LOG.debug("Exception details for failure to load region grouping strategy.", e);
throw new IOException("couldn't set up region grouping strategy", e);
}
} | 3.68 |
hbase_StoreUtils_getSplitPoint | /**
* Gets the mid point of the largest file passed in as split point.
*/
static Optional<byte[]> getSplitPoint(Collection<HStoreFile> storefiles,
CellComparator comparator) throws IOException {
Optional<HStoreFile> largestFile = StoreUtils.getLargestFile(storefiles);
return largestFile.isPresent()
? StoreUtils.getFileSplitPoint(largestFile.get(), comparator)
: Optional.empty();
} | 3.68 |
framework_VaadinPortlet_getCurrent | /**
* Gets the currently used Vaadin portlet. The current portlet is
* automatically defined when processing requests related to the service
* (see {@link ThreadLocal}) and in {@link VaadinSession#access(Runnable)}
* and {@link UI#access(Runnable)}. In other cases, (e.g. from background
* threads, the current service is not automatically defined.
*
* The current portlet is derived from the current service using
* {@link VaadinService#getCurrent()}
*
* @return the current vaadin portlet instance if available, otherwise
* <code>null</code>
*
* @since 7.0
*/
public static VaadinPortlet getCurrent() {
VaadinService vaadinService = CurrentInstance.get(VaadinService.class);
if (vaadinService instanceof VaadinPortletService) {
VaadinPortletService vps = (VaadinPortletService) vaadinService;
return vps.getPortlet();
} else {
return null;
}
} | 3.68 |
flink_QuickSort_fix | /**
* Fix the records into sorted order, swapping when the first record is greater than the second
* record.
*
* @param s paged sortable
* @param pN page number of first record
* @param pO page offset of first record
* @param rN page number of second record
* @param rO page offset of second record
*/
private static void fix(IndexedSortable s, int pN, int pO, int rN, int rO) {
if (s.compare(pN, pO, rN, rO) > 0) {
s.swap(pN, pO, rN, rO);
}
} | 3.68 |
framework_BasicEventProvider_containsEvent | /**
* Does this event provider container this event.
*
* @param event
* The event to check for
* @return If this provider has the event then true is returned, else false
*/
public boolean containsEvent(BasicEvent event) {
return eventList.contains(event);
} | 3.68 |
hbase_MemStoreFlusher_getRequeueCount | /**
* @return Count of times {@link #requeue(long)} was called; i.e this is number of times we've
* been requeued.
*/
public int getRequeueCount() {
return this.requeueCount;
} | 3.68 |
framework_AbstractComponent_getIcon | /*
* Gets the component's icon resource. Don't add a JavaDoc comment here, we
* use the default documentation from implemented interface.
*/
@Override
public Resource getIcon() {
return getResource(ComponentConstants.ICON_RESOURCE);
} | 3.68 |
flink_SourceReader_handleSourceEvents | /**
* Handle a custom source event sent by the {@link SplitEnumerator}. This method is called when
* the enumerator sends an event via {@link SplitEnumeratorContext#sendEventToSourceReader(int,
* SourceEvent)}.
*
* <p>This method has a default implementation that does nothing, because most sources do not
* require any custom events.
*
* @param sourceEvent the event sent by the {@link SplitEnumerator}.
*/
default void handleSourceEvents(SourceEvent sourceEvent) {} | 3.68 |
querydsl_BeanMap_valueIterator | /**
* Convenience method for getting an iterator over the values.
*
* @return an iterator over the values
*/
public Iterator<Object> valueIterator() {
final Iterator<String> iter = keyIterator();
return new Iterator<Object>() {
@Override
public boolean hasNext() {
return iter.hasNext();
}
@Override
public Object next() {
Object key = iter.next();
return get(key);
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove() not supported for BeanMap");
}
};
} | 3.68 |
hadoop_RMContainerTokenSecretManager_activateNextMasterKey | /**
* Activate the new master-key
*/
@Private
public void activateNextMasterKey() {
super.writeLock.lock();
try {
LOG.info("Activating next master key with id: "
+ this.nextMasterKey.getMasterKey().getKeyId());
this.currentMasterKey = this.nextMasterKey;
this.nextMasterKey = null;
} finally {
super.writeLock.unlock();
}
} | 3.68 |
hudi_HoodiePartitionMetadata_readFromFS | /**
* Read out the metadata for this partition.
*/
public void readFromFS() throws IOException {
// first try reading the text format (legacy, currently widespread)
boolean readFile = readTextFormatMetaFile();
if (!readFile) {
// now try reading the base file formats.
readFile = readBaseFormatMetaFile();
}
// throw exception.
if (!readFile) {
throw new HoodieException("Unable to read any partition meta file to locate the table timeline.");
}
} | 3.68 |
hbase_Scan_isReversed | /**
* Get whether this scan is a reversed one.
* @return true if backward scan, false if forward(default) scan
*/
public boolean isReversed() {
return reversed;
} | 3.68 |
hbase_KeyValue_getSequenceId | /**
* used to achieve atomic operations in the memstore.
*/
@Override
public long getSequenceId() {
return seqId;
} | 3.68 |
hadoop_SecureableZone_removeRecord | /**
* Removes a record from the Zone.
* @param r The record to be removed
* @see Record
*/
@Override public void removeRecord(Record r) {
if (records == null) {
records = new ArrayList<Record>();
}
super.removeRecord(r);
records.remove(r);
} | 3.68 |
pulsar_PulsarAdminImpl_getServiceUrl | /**
* @return the service HTTP URL that is being used
*/
public String getServiceUrl() {
return serviceUrl;
} | 3.68 |
flink_SqlReplaceTableAs_getFullConstraints | /** Returns the column constraints plus the table constraints. */
public List<SqlTableConstraint> getFullConstraints() {
return SqlConstraintValidator.getFullConstraints(tableConstraints, columnList);
} | 3.68 |
hmily_ZookeeperRepository_hasNext | /**
* Has next boolean.
*
* @return the boolean
*/
public boolean hasNext() {
return index < nodes.length;
} | 3.68 |
flink_StringUtils_concatenateWithAnd | /**
* If both string arguments are non-null, this method concatenates them with ' and '. If only
* one of the arguments is non-null, this method returns the non-null argument. If both
* arguments are null, this method returns null.
*
* @param s1 The first string argument
* @param s2 The second string argument
* @return The concatenated string, or non-null argument, or null
*/
@Nullable
public static String concatenateWithAnd(@Nullable String s1, @Nullable String s2) {
if (s1 != null) {
return s2 == null ? s1 : s1 + " and " + s2;
} else {
return s2;
}
} | 3.68 |
hbase_TransitRegionStateProcedure_serverCrashed | // Should be called with RegionStateNode locked
public void serverCrashed(MasterProcedureEnv env, RegionStateNode regionNode,
ServerName serverName, boolean forceNewPlan) throws IOException {
this.forceNewPlan = forceNewPlan;
if (remoteProc != null) {
// this means we are waiting for the sub procedure, so wake it up
remoteProc.serverCrashed(env, regionNode, serverName);
} else {
// we are in RUNNING state, just update the region state, and we will process it later.
env.getAssignmentManager().regionClosedAbnormally(regionNode);
}
} | 3.68 |
flink_StateTtlConfig_setTtlTimeCharacteristic | /**
* Sets the time characteristic.
*
* @param ttlTimeCharacteristic The time characteristic configures time scale to use for
* ttl.
*/
@Nonnull
public Builder setTtlTimeCharacteristic(
@Nonnull TtlTimeCharacteristic ttlTimeCharacteristic) {
this.ttlTimeCharacteristic = ttlTimeCharacteristic;
return this;
} | 3.68 |
framework_TreeGridConnector_isCollapseAllowed | /**
* Checks if the item can be collapsed.
*
* @param row
* the item row
* @return {@code true} if the item is allowed to be collapsed,
* {@code false} otherwise.
*/
public static boolean isCollapseAllowed(JsonObject row) {
return row.getBoolean(
HierarchicalDataCommunicatorConstants.ROW_COLLAPSE_ALLOWED);
} | 3.68 |
hbase_StateMachineProcedure_getCurrentStateId | /**
* This method is used from test code as it cannot be assumed that state transition will happen
* sequentially. Some procedures may skip steps/ states, some may add intermediate steps in
* future.
*/
public int getCurrentStateId() {
return getStateId(getCurrentState());
} | 3.68 |
hbase_Mutation_has | /**
* Private method to determine if this object's familyMap contains the given value assigned to the
* given family, qualifier and timestamp, respecting the 2 boolean arguments.
*/
protected boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, boolean ignoreTS,
boolean ignoreValue) {
List<Cell> list = getCellList(family);
if (list.isEmpty()) {
return false;
}
// Boolean analysis of ignoreTS/ignoreValue.
// T T => 2
// T F => 3 (first is always true)
// F T => 2
// F F => 1
if (!ignoreTS && !ignoreValue) {
for (Cell cell : list) {
if (
CellUtil.matchingFamily(cell, family) && CellUtil.matchingQualifier(cell, qualifier)
&& CellUtil.matchingValue(cell, value) && cell.getTimestamp() == ts
) {
return true;
}
}
} else if (ignoreValue && !ignoreTS) {
for (Cell cell : list) {
if (
CellUtil.matchingFamily(cell, family) && CellUtil.matchingQualifier(cell, qualifier)
&& cell.getTimestamp() == ts
) {
return true;
}
}
} else if (!ignoreValue && ignoreTS) {
for (Cell cell : list) {
if (
CellUtil.matchingFamily(cell, family) && CellUtil.matchingQualifier(cell, qualifier)
&& CellUtil.matchingValue(cell, value)
) {
return true;
}
}
} else {
for (Cell cell : list) {
if (CellUtil.matchingFamily(cell, family) && CellUtil.matchingQualifier(cell, qualifier)) {
return true;
}
}
}
return false;
} | 3.68 |
morf_ParallelQueryHint_toString | /**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return degreeOfParallelism == null ? getClass().getSimpleName() : format("ParallelQueryHint [degreeOfParallelism=%s]", degreeOfParallelism.toString());
} | 3.68 |
flink_CommittableCollector_getCheckpointCommittablesUpTo | /**
* Returns all {@link CheckpointCommittableManager} until the requested checkpoint id.
*
* @param checkpointId counter
* @return collection of {@link CheckpointCommittableManager}
*/
public Collection<? extends CheckpointCommittableManager<CommT>> getCheckpointCommittablesUpTo(
long checkpointId) {
// clean up fully committed previous checkpoints
// this wouldn't work with concurrent unaligned checkpoints
Collection<CheckpointCommittableManagerImpl<CommT>> checkpoints =
checkpointCommittables.headMap(checkpointId, true).values();
checkpoints.removeIf(CheckpointCommittableManagerImpl::isFinished);
return checkpoints;
} | 3.68 |
framework_StaticSection_getComponent | /**
* Returns the component displayed in this cell.
*
* @return the component
*/
public Component getComponent() {
if (cellState.type != GridStaticCellType.WIDGET) {
throw new IllegalStateException(
"Cannot fetch Component from a cell with type "
+ cellState.type);
}
return (Component) cellState.connector;
} | 3.68 |
hadoop_JsonSerialization_toJson | /**
* Convert an instance to a JSON string.
* @param instance instance to convert
* @return a JSON string description
* @throws JsonProcessingException Json generation problems
*/
public synchronized String toJson(T instance) throws JsonProcessingException {
return mapper.writeValueAsString(instance);
} | 3.68 |
morf_SqlDialect_getSqlForMin | /**
* Converts the min function into SQL.
*
* @param function the function details
* @return a string representation of the SQL
*/
protected String getSqlForMin(Function function) {
return "MIN(" + getSqlFrom(function.getArguments().get(0)) + ")";
} | 3.68 |
hudi_ClusteringCommitSink_validateWriteResult | /**
* Validate actions taken by clustering. In the first implementation, we validate at least one new file is written.
* But we can extend this to add more validation. E.g. number of records read = number of records written etc.
* We can also make these validations in BaseCommitActionExecutor to reuse pre-commit hooks for multiple actions.
*/
private static void validateWriteResult(HoodieClusteringPlan clusteringPlan, String instantTime, HoodieWriteMetadata<List<WriteStatus>> writeMetadata) {
if (writeMetadata.getWriteStatuses().isEmpty()) {
throw new HoodieClusteringException("Clustering plan produced 0 WriteStatus for " + instantTime
+ " #groups: " + clusteringPlan.getInputGroups().size() + " expected at least "
+ clusteringPlan.getInputGroups().stream().mapToInt(HoodieClusteringGroup::getNumOutputFileGroups).sum()
+ " write statuses");
}
} | 3.68 |
flink_JavaFieldPredicates_annotatedWith | /**
* Match the single Annotation of the {@link JavaField}.
*
* @return A {@link DescribedPredicate} returning true, if the tested {@link JavaField} is
* annotated with the annotation identified by the fully qualified name {@code
* fqAnnotationTypeName}.
*/
public static DescribedPredicate<JavaField> annotatedWith(String fqAnnotationTypeName) {
String className = getClassSimpleNameFromFqName(fqAnnotationTypeName);
return matchAnnotationType(
className,
annotation -> annotation.getRawType().getName().equals(fqAnnotationTypeName));
} | 3.68 |
framework_VCheckBoxGroup_buildOptions | /*
* Build all the options
*/
public void buildOptions(List<JsonObject> items) {
Roles.getGroupRole().set(getElement());
int i = 0;
int widgetsToRemove = getWidget().getWidgetCount() - items.size();
if (widgetsToRemove < 0) {
widgetsToRemove = 0;
}
List<Widget> remove = new ArrayList<>(widgetsToRemove);
for (Widget widget : getWidget()) {
if (i < items.size()) {
updateItem((VCheckBox) widget, items.get(i), false);
i++;
} else {
remove.add(widget);
}
}
remove.stream().forEach(this::remove);
while (i < items.size()) {
updateItem(new VCheckBox(), items.get(i), true);
i++;
}
} | 3.68 |
hibernate-validator_ExecutableMetaData_findParameterMetaData | /**
* Finds the one executable from the underlying hierarchy with parameter
* constraints. If no executable in the hierarchy is parameter constrained,
* the parameter meta data from this builder's base executable is returned.
*
* @return The parameter meta data for this builder's executable.
*/
private List<ParameterMetaData> findParameterMetaData() {
List<ParameterMetaData.Builder> parameterBuilders = null;
for ( ConstrainedExecutable oneExecutable : constrainedExecutables ) {
if ( parameterBuilders == null ) {
parameterBuilders = newArrayList();
for ( ConstrainedParameter oneParameter : oneExecutable.getAllParameterMetaData() ) {
parameterBuilders.add(
new ParameterMetaData.Builder(
callable.getDeclaringClass(),
oneParameter,
constraintCreationContext,
parameterNameProvider
)
);
}
}
else {
int i = 0;
for ( ConstrainedParameter oneParameter : oneExecutable.getAllParameterMetaData() ) {
parameterBuilders.get( i ).add( oneParameter );
i++;
}
}
}
List<ParameterMetaData> parameterMetaDatas = newArrayList();
for ( ParameterMetaData.Builder oneBuilder : parameterBuilders ) {
parameterMetaDatas.add( oneBuilder.build() );
}
return parameterMetaDatas;
} | 3.68 |
hadoop_PipesPartitioner_getPartition | /**
* If a partition result was set manually, return it. Otherwise, we call
* the Java partitioner.
* @param key the key to partition
* @param value the value to partition
* @param numPartitions the number of reduces
*/
public int getPartition(K key, V value,
int numPartitions) {
Integer result = CACHE.get();
if (result == null) {
return part.getPartition(key, value, numPartitions);
} else {
return result;
}
} | 3.68 |
hadoop_StagingCommitter_cleanup | /**
* Staging committer cleanup includes calling wrapped committer's
* cleanup method, and removing staging uploads path and all
* destination paths in the final filesystem.
* @param commitContext commit context
* @param suppressExceptions should exceptions be suppressed?
* @throws IOException IO failures if exceptions are not suppressed.
*/
@Override
@SuppressWarnings("deprecation")
protected void cleanup(CommitContext commitContext,
boolean suppressExceptions)
throws IOException {
maybeIgnore(suppressExceptions, "Cleanup wrapped committer",
() -> wrappedCommitter.cleanupJob(
commitContext.getJobContext()));
maybeIgnore(suppressExceptions, "Delete staging uploads path",
() -> deleteStagingUploadsParentDirectory(
commitContext.getJobContext()));
maybeIgnore(suppressExceptions, "Delete destination paths",
() -> deleteDestinationPaths(
commitContext.getJobContext()));
super.cleanup(commitContext, suppressExceptions);
} | 3.68 |
hadoop_ServletUtil_parseLongParam | /**
* parseLongParam.
*
* @param request request.
* @param param param.
* @return a long value as passed in the given parameter, throwing
* an exception if it is not present or if it is not a valid number.
* @throws IOException raised on errors performing I/O.
*/
public static long parseLongParam(ServletRequest request, String param)
throws IOException {
String paramStr = request.getParameter(param);
if (paramStr == null) {
throw new IOException("Invalid request has no " + param + " parameter");
}
return Long.parseLong(paramStr);
} | 3.68 |
pulsar_StreamingDataBlockHeaderImpl_toStream | /**
* Get the content of the data block header as InputStream.
* Read out in format:
* [ magic_word -- int ][ block_len -- int ][ first_entry_id -- long] [padding zeros]
*/
@Override
public InputStream toStream() {
ByteBuf out = PulsarByteBufAllocator.DEFAULT.buffer(HEADER_MAX_SIZE, HEADER_MAX_SIZE);
out.writeInt(MAGIC_WORD)
.writeLong(headerLength)
.writeLong(blockLength)
.writeLong(firstEntryId)
.writeLong(ledgerId)
.writeBytes(PADDING);
// true means the input stream will release the ByteBuf on close
return new ByteBufInputStream(out, true);
} | 3.68 |
hudi_OptionsResolver_sortClusteringEnabled | /**
* Returns whether the clustering sort is enabled.
*/
public static boolean sortClusteringEnabled(Configuration conf) {
return !StringUtils.isNullOrEmpty(conf.getString(FlinkOptions.CLUSTERING_SORT_COLUMNS));
} | 3.68 |
hadoop_StepType_getDescription | /**
* Returns step type description.
*
* @return String step type description
*/
public String getDescription() {
return description;
} | 3.68 |
rocketmq-connect_AbstractConfigManagementService_putTaskConfigs | /**
* remove and add
*
* @param connectorName
* @param configs
*/
protected void putTaskConfigs(String connectorName, List<ConnectKeyValue> configs) {
List<ConnectKeyValue> exist = taskKeyValueStore.get(connectorName);
if (null != exist && exist.size() > 0) {
taskKeyValueStore.remove(connectorName);
}
taskKeyValueStore.put(connectorName, configs);
} | 3.68 |
hbase_ByteBufferArray_asSubByteBuffers | /**
* Creates a sub-array from a given array of ByteBuffers from the given offset to the length
* specified. For eg, if there are 4 buffers forming an array each with length 10 and if we call
* asSubByteBuffers(5, 10) then we will create an sub-array consisting of two BBs and the first
* one be a BB from 'position' 5 to a 'length' 5 and the 2nd BB will be from 'position' 0 to
* 'length' 5.
* @param offset the position in the whole array which is composited by multiple byte buffers.
* @param len the length of bytes
* @return the underlying ByteBuffers, each ByteBuffer is a slice from the backend and will have a
* zero position.
*/
public ByteBuffer[] asSubByteBuffers(long offset, final int len) {
BufferIterator it = new BufferIterator(offset, len);
ByteBuffer[] mbb = new ByteBuffer[it.getBufferCount()];
for (int i = 0; i < mbb.length; i++) {
assert it.hasNext();
mbb[i] = it.next();
}
assert it.getSum() == len;
return mbb;
} | 3.68 |
AreaShop_TeleportFeature_setTeleport | /**
* Set the teleport location of this region.
* @param location The location to set as teleport location
*/
public void setTeleport(Location location) {
if(location == null) {
getRegion().setSetting("general.teleportLocation", null);
} else {
getRegion().setSetting("general.teleportLocation", Utils.locationToConfig(location, true));
}
} | 3.68 |
hadoop_NamedCommitterFactory_loadCommitterClass | /**
* Load the class named in {@link #NAMED_COMMITTER_CLASS}.
* @param context job or task context
* @return the committer class
* @throws IOException if no committer was defined.
*/
private Class<? extends PathOutputCommitter> loadCommitterClass(
JobContext context) throws IOException {
Preconditions.checkNotNull(context, "null context");
Configuration conf = context.getConfiguration();
String value = conf.get(NAMED_COMMITTER_CLASS, "");
if (value.isEmpty()) {
throw new IOException("No committer defined in " + NAMED_COMMITTER_CLASS);
}
return conf.getClass(NAMED_COMMITTER_CLASS,
FileOutputCommitter.class, PathOutputCommitter.class);
} | 3.68 |
streampipes_TextDocumentStatistics_getNumWords | /**
* Returns the overall number of words in all blocks.
*
* @return Sum
*/
public int getNumWords() {
return numWords;
} | 3.68 |
flink_PekkoInvocationHandler_invokeRpc | /**
* Invokes a RPC method by sending the RPC invocation details to the rpc endpoint.
*
* @param method to call
* @param args of the method call
* @return result of the RPC; the result future is completed with a {@link TimeoutException} if
* the requests times out; if the recipient is not reachable, then the result future is
* completed with a {@link RecipientUnreachableException}.
* @throws Exception if the RPC invocation fails
*/
private Object invokeRpc(Method method, Object[] args) throws Exception {
String methodName = method.getName();
Class<?>[] parameterTypes = method.getParameterTypes();
final boolean isLocalRpcInvocation = method.getAnnotation(Local.class) != null;
Annotation[][] parameterAnnotations = method.getParameterAnnotations();
Duration futureTimeout =
RpcGatewayUtils.extractRpcTimeout(parameterAnnotations, args, timeout);
final RpcInvocation rpcInvocation =
createRpcInvocationMessage(
method.getDeclaringClass().getSimpleName(),
methodName,
isLocalRpcInvocation,
parameterTypes,
args);
Class<?> returnType = method.getReturnType();
final Object result;
if (Objects.equals(returnType, Void.TYPE)) {
tell(rpcInvocation);
result = null;
} else {
// Capture the call stack. It is significantly faster to do that via an exception than
// via Thread.getStackTrace(), because exceptions lazily initialize the stack trace,
// initially only
// capture a lightweight native pointer, and convert that into the stack trace lazily
// when needed.
final Throwable callStackCapture = captureAskCallStack ? new Throwable() : null;
// execute an asynchronous call
final CompletableFuture<?> resultFuture =
ask(rpcInvocation, futureTimeout)
.thenApply(
resultValue ->
deserializeValueIfNeeded(
resultValue, method, flinkClassLoader));
final CompletableFuture<Object> completableFuture = new CompletableFuture<>();
resultFuture.whenComplete(
(resultValue, failure) -> {
if (failure != null) {
completableFuture.completeExceptionally(
resolveTimeoutException(
ExceptionUtils.stripCompletionException(failure),
callStackCapture,
address,
rpcInvocation));
} else {
completableFuture.complete(resultValue);
}
});
if (Objects.equals(returnType, CompletableFuture.class)) {
result = completableFuture;
} else {
try {
result = completableFuture.get(futureTimeout.toMillis(), TimeUnit.MILLISECONDS);
} catch (ExecutionException ee) {
throw new RpcException(
"Failure while obtaining synchronous RPC result.",
ExceptionUtils.stripExecutionException(ee));
}
}
}
return result;
} | 3.68 |
hbase_RowResource_updateBinary | // This currently supports only update of one row at a time.
Response updateBinary(final byte[] message, final HttpHeaders headers, final boolean replace) {
servlet.getMetrics().incrementRequests(1);
if (servlet.isReadOnly()) {
servlet.getMetrics().incrementFailedPutRequests(1);
return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT)
.entity("Forbidden" + CRLF).build();
}
Table table = null;
try {
byte[] row = rowspec.getRow();
byte[][] columns = rowspec.getColumns();
byte[] column = null;
if (columns != null) {
column = columns[0];
}
long timestamp = HConstants.LATEST_TIMESTAMP;
List<String> vals = headers.getRequestHeader("X-Row");
if (vals != null && !vals.isEmpty()) {
row = Bytes.toBytes(vals.get(0));
}
vals = headers.getRequestHeader("X-Column");
if (vals != null && !vals.isEmpty()) {
column = Bytes.toBytes(vals.get(0));
}
vals = headers.getRequestHeader("X-Timestamp");
if (vals != null && !vals.isEmpty()) {
timestamp = Long.parseLong(vals.get(0));
}
if (column == null) {
servlet.getMetrics().incrementFailedPutRequests(1);
return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
.entity("Bad request: Column found to be null." + CRLF).build();
}
Put put = new Put(row);
byte parts[][] = CellUtil.parseColumn(column);
if (parts.length != 2) {
return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
.entity("Bad request" + CRLF).build();
}
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow())
.setFamily(parts[0]).setQualifier(parts[1]).setTimestamp(timestamp).setType(Type.Put)
.setValue(message).build());
table = servlet.getTable(tableResource.getName());
table.put(put);
if (LOG.isTraceEnabled()) {
LOG.trace("PUT " + put.toString());
}
servlet.getMetrics().incrementSucessfulPutRequests(1);
return Response.ok().build();
} catch (Exception e) {
servlet.getMetrics().incrementFailedPutRequests(1);
return processException(e);
} finally {
if (table != null) {
try {
table.close();
} catch (IOException ioe) {
LOG.debug("Exception received while closing the table", ioe);
}
}
}
} | 3.68 |
graphhopper_VectorTile_setExtent | /**
* <pre>
* Although this is an "optional" field it is required by the specification.
* See https://github.com/mapbox/vector-tile-spec/issues/47
* </pre>
*
* <code>optional uint32 extent = 5 [default = 4096];</code>
*/
public Builder setExtent(int value) {
bitField0_ |= 0x00000020;
extent_ = value;
onChanged();
return this;
} | 3.68 |
hadoop_Chain_setReducer | /**
* Sets the Reducer class to the chain job.
*
* <p>
* The configuration properties of the chain job have precedence over the
* configuration properties of the Reducer.
*
* @param job
* the chain job.
* @param klass
* the Reducer class to add.
* @param inputKeyClass
* reducer input key class.
* @param inputValueClass
* reducer input value class.
* @param outputKeyClass
* reducer output key class.
* @param outputValueClass
* reducer output value class.
* @param reducerConf
* a configuration for the Reducer class. It is recommended to use a
* Configuration without default values using the
* <code>Configuration(boolean loadDefaults)</code> constructor with
* FALSE.
*/
@SuppressWarnings("unchecked")
protected static void setReducer(Job job, Class<? extends Reducer> klass,
Class<?> inputKeyClass, Class<?> inputValueClass,
Class<?> outputKeyClass, Class<?> outputValueClass,
Configuration reducerConf) {
String prefix = getPrefix(false);
Configuration jobConf = job.getConfiguration();
checkReducerAlreadySet(false, jobConf, prefix, false);
jobConf.setClass(prefix + CHAIN_REDUCER_CLASS, klass, Reducer.class);
setReducerConf(jobConf, inputKeyClass, inputValueClass, outputKeyClass,
outputValueClass, reducerConf, prefix);
} | 3.68 |
flink_AvroWriters_forGenericRecord | /**
* Creates an {@link AvroWriterFactory} that accepts and writes Avro generic types. The Avro
* writers will use the given schema to build and write the records.
*
* @param schema The schema of the generic type.
*/
public static AvroWriterFactory<GenericRecord> forGenericRecord(Schema schema) {
String schemaString = schema.toString();
// Must override the lambda representation because of a bug in shading lambda
// serialization, see similar issue FLINK-28043 for more details.
AvroBuilder<GenericRecord> builder =
new AvroBuilder<GenericRecord>() {
@Override
public DataFileWriter<GenericRecord> createWriter(OutputStream outputStream)
throws IOException {
return createAvroDataFileWriter(
schemaString,
new Function<Schema, DatumWriter<GenericRecord>>() {
@Override
public DatumWriter<GenericRecord> apply(Schema schema) {
return new GenericDatumWriter<>(schema);
}
},
outputStream);
}
};
return new AvroWriterFactory<>(builder);
} | 3.68 |
flink_Catalog_bulkGetPartitionColumnStatistics | /**
* Get a list of column statistics for given partitions.
*
* @param tablePath path of the table
* @param partitionSpecs partition specs of partitions that will be used to filter out all other
* unrelated statistics, i.e. the statistics fetch will be limited within the given
* partitions
* @return list of column statistics for given partitions
* @throws PartitionNotExistException if one partition does not exist
* @throws CatalogException in case of any runtime exception
*/
default List<CatalogColumnStatistics> bulkGetPartitionColumnStatistics(
ObjectPath tablePath, List<CatalogPartitionSpec> partitionSpecs)
throws PartitionNotExistException, CatalogException {
checkNotNull(partitionSpecs, "partitionSpecs cannot be null");
List<CatalogColumnStatistics> result = new ArrayList<>(partitionSpecs.size());
for (CatalogPartitionSpec partitionSpec : partitionSpecs) {
result.add(getPartitionColumnStatistics(tablePath, partitionSpec));
}
return result;
} | 3.68 |
framework_QuerySortOrder_asc | /**
* Creates a new query sort builder with given sorting using ascending sort
* direction.
*
* @param by
* the string to sort by
*
* @return the query sort builder
*/
public static QuerySortOrderBuilder asc(String by) {
return new QuerySortOrderBuilder().thenAsc(by);
} | 3.68 |
framework_TabSheet_addTab | /**
* Adds a new tab into TabSheet. Component caption and icon are copied to
* the tab metadata at creation time.
*
* If the tab sheet already contains the component, its tab is returned.
*
* @param component
* the component to be added onto tab - should not be null.
* @param position
* The position where the tab should be added
* @return the created {@link Tab}
*/
public Tab addTab(Component component, int position) {
Tab result = tabs.get(component);
if (result == null) {
result = addTab(component, component.getCaption(),
component.getIcon(), position);
}
return result;
}
/**
* Moves all components from another container to this container. The
* components are removed from the other container.
*
* If the source container is a {@link TabSheet} | 3.68 |
morf_DataValueLookup_getObject | /**
* Gets the value as either a long, integer, boolean, date, local date, big decimal,
* byte array or string according to the type definition when called.
*
* <p>Just dispatches to the corresponding typed method (e.g. {@link #getBoolean(String)}).
*
* <p>Most useful when interacting with {@link ResultSet}. In order to facilitate this
* use, dates are always returned as a {@link java.sql.Date} rather than a {@link org.joda.time.LocalDate}</p>
*
* @param column The column.
* @return The value.
*/
public default Object getObject(Column column) {
switch (column.getType()) {
case BIG_INTEGER:
return getLong(column.getName());
case BOOLEAN:
return getBoolean(column.getName());
case INTEGER:
return getInteger(column.getName());
case DATE:
return getDate(column.getName());
case DECIMAL:
BigDecimal result = getBigDecimal(column.getName());
try {
return result == null ? null : result.setScale(column.getScale());
} catch (ArithmeticException e) {
throw new IllegalStateException(String.format(
"Value of decimal column [%s] has a value of [%s] which must be rounded to fit into (%d,%d). " +
"To read it with this precision, ensure it is written with rounding pre-applied.",
column.getName(),
result.toPlainString(),
column.getWidth(),
column.getScale()
), e);
}
case BLOB:
return getByteArray(column.getName());
case CLOB:
case STRING:
return getString(column.getName());
default:
throw new UnsupportedOperationException("Column [" + column.getName() + "] type [" + column.getType() + "] not known");
}
} | 3.68 |
flink_AbstractParameterTool_getBoolean | /**
* Returns the Boolean value for the given key. If the key does not exists it will return the
* default value given. The method returns whether the string of the value is "true" ignoring
* cases.
*/
public boolean getBoolean(String key, boolean defaultValue) {
addToDefaults(key, Boolean.toString(defaultValue));
String value = get(key);
if (value == null) {
return defaultValue;
} else {
return Boolean.valueOf(value);
}
} | 3.68 |
framework_VRichTextToolbar_updateStatus | /**
* Updates the status of all the stateful buttons.
*/
@SuppressWarnings("deprecation")
private void updateStatus() {
if (basic != null) {
bold.setDown(basic.isBold());
italic.setDown(basic.isItalic());
underline.setDown(basic.isUnderlined());
subscript.setDown(basic.isSubscript());
superscript.setDown(basic.isSuperscript());
}
if (extended != null) {
strikethrough.setDown(extended.isStrikethrough());
}
} | 3.68 |
hadoop_CRC64_init | /*
* Initialize a table constructed from POLY (0x9a6c9329ac4bc9b5L).
* */
private void init() {
value = -1;
for (int n = 0; n < TABLE_LENGTH; ++n) {
long crc = n;
for (int i = 0; i < 8; ++i) {
if ((crc & 1) == 1) {
crc = (crc >>> 1) ^ POLY;
} else {
crc >>>= 1;
}
}
TABLE[n] = crc;
}
} | 3.68 |
flink_FutureUtils_throwIfCompletedExceptionally | /**
* Throws the causing exception if the given future is completed exceptionally, otherwise do
* nothing.
*
* @param future the future to check.
* @throws Exception when the future is completed exceptionally.
*/
public static void throwIfCompletedExceptionally(CompletableFuture<?> future) throws Exception {
if (future.isCompletedExceptionally()) {
future.get();
}
} | 3.68 |
querydsl_MathExpressions_coth | /**
* Create a {@code coth(num)} expression
*
* <p>Returns the hyperbolic cotangent of num.</p>
*
* @param num numeric expression
* @return coth(num)
*/
public static <A extends Number & Comparable<?>> NumberExpression<Double> coth(Expression<A> num) {
return Expressions.numberOperation(Double.class, Ops.MathOps.COTH, num);
} | 3.68 |
pulsar_ProducerImpl_getCnxIfReady | /**
* Hook method for testing. By returning null, it's possible to prevent messages
* being delivered to the broker.
*
* @return cnx if OpSend messages should be written to open connection. Caller must
* verify that the returned cnx is not null before using reference.
*/
protected ClientCnx getCnxIfReady() {
if (getState() == State.Ready) {
return connectionHandler.cnx();
} else {
return null;
}
} | 3.68 |
rocketmq-connect_PluginUtils_isConcrete | /**
* Verify the given class corresponds to a concrete class and not to an abstract class or
*/
public static boolean isConcrete(Class<?> klass) {
int mod = klass.getModifiers();
return !Modifier.isAbstract(mod) && !Modifier.isInterface(mod);
} | 3.68 |
hbase_MetricsMaster_incrementRequests | /**
* @param inc How much to add to requests.
*/
public void incrementRequests(final long inc) {
masterSource.incRequests(inc);
} | 3.68 |
hadoop_ApplicationEntity_isApplicationEntity | /**
* Checks if the input TimelineEntity object is an ApplicationEntity.
*
* @param te TimelineEntity object.
* @return true if input is an ApplicationEntity, false otherwise
*/
public static boolean isApplicationEntity(TimelineEntity te) {
return (te == null ? false
: te.getType().equals(TimelineEntityType.YARN_APPLICATION.toString()));
} | 3.68 |
hbase_BufferedDataBlockEncoder_ensureSpace | /**
* Asserts that there is at least the given amount of unfilled space remaining in the given
* buffer.
* @param out typically, the buffer we are writing to
* @param length the required space in the buffer
* @throws EncoderBufferTooSmallException If there are no enough bytes.
*/
protected static void ensureSpace(ByteBuffer out, int length)
throws EncoderBufferTooSmallException {
if (out.position() + length > out.limit()) {
throw new EncoderBufferTooSmallException("Buffer position=" + out.position()
+ ", buffer limit=" + out.limit() + ", length to be written=" + length);
}
} | 3.68 |
framework_ViewChangeListener_getNewView | /**
* Returns the view being activated.
*
* @return new View
*/
public View getNewView() {
return newView;
} | 3.68 |
incubator-hugegraph-toolchain_HugeGraphLoader_loadStruct | /**
* TODO: Separate classes: ReadHandler -> ParseHandler -> InsertHandler
* Let load task worked in pipeline mode
*/
private void loadStruct(InputStruct struct, InputReader reader) {
LOG.info("Start loading '{}'", struct);
LoadMetrics metrics = this.context.summary().metrics(struct);
metrics.startInFlight();
ParseTaskBuilder taskBuilder = new ParseTaskBuilder(this.context, struct);
final int batchSize = this.context.options().batchSize;
List<Line> lines = new ArrayList<>(batchSize);
for (boolean finished = false; !finished;) {
if (this.context.stopped()) {
break;
}
try {
// Read next line from data source
if (reader.hasNext()) {
Line next = reader.next();
if (Objects.nonNull(next)) {
lines.add(next);
metrics.increaseReadSuccess();
}
} else {
finished = true;
}
} catch (ReadException e) {
metrics.increaseReadFailure();
this.handleReadFailure(struct, e);
}
// If read max allowed lines, stop loading
boolean reachedMaxReadLines = this.reachedMaxReadLines();
if (reachedMaxReadLines) {
finished = true;
}
if (lines.size() >= batchSize || finished) {
List<ParseTaskBuilder.ParseTask> tasks = taskBuilder.build(lines);
for (ParseTaskBuilder.ParseTask task : tasks) {
this.executeParseTask(struct, task.mapping(), task);
}
// Confirm offset to avoid lost records
reader.confirmOffset();
this.context.newProgress().markLoaded(struct, finished);
this.handleParseFailure();
if (reachedMaxReadLines) {
LOG.warn("Read lines exceed limit, stopped loading tasks");
this.context.stopLoading();
}
lines = new ArrayList<>(batchSize);
}
}
metrics.stopInFlight();
LOG.info("Finish loading '{}'", struct);
} | 3.68 |
pulsar_PulsarAdminImpl_close | /**
* Close the Pulsar admin client to release all the resources.
*/
@Override
public void close() {
try {
auth.close();
} catch (IOException e) {
LOG.error("Failed to close the authentication service", e);
}
client.close();
asyncHttpConnector.close();
} | 3.68 |
flink_FlinkRelMetadataQuery_getFilteredColumnInterval | /**
* Returns the {@link FlinkMetadata.ColumnInterval} of the given column under the given filter
* argument.
*
* @param rel the relational expression
* @param columnIndex the index of the given column
* @param filterArg the index of the filter argument
* @return the interval of the given column of a specified relational expression. Returns null
* if interval cannot be estimated, Returns {@link
* org.apache.flink.table.planner.plan.stats.EmptyValueInterval} if column values does not
* contains any value except for null.
*/
public ValueInterval getFilteredColumnInterval(RelNode rel, int columnIndex, int filterArg) {
for (; ; ) {
try {
return filteredColumnInterval.getFilteredColumnInterval(
rel, this, columnIndex, filterArg);
} catch (JaninoRelMetadataProvider.NoHandler e) {
filteredColumnInterval =
revise(e.relClass, FlinkMetadata.FilteredColumnInterval.DEF);
}
}
} | 3.68 |
streampipes_MD5_crypt | /**
* Encodes a string
*
* @param str String to encode
* @return Encoded String
*/
public static String crypt(String str) {
if (str == null || str.length() == 0) {
throw new IllegalArgumentException("String to encrypt cannot be null or zero length");
}
StringBuilder hexString = new StringBuilder();
try {
MessageDigest md = MessageDigest.getInstance("MD5");
md.update(str.getBytes());
byte[] hash = md.digest();
for (byte b : hash) {
if ((0xff & b) < 0x10) {
hexString.append("0").append(Integer.toHexString((0xFF & b)));
} else {
hexString.append(Integer.toHexString(0xFF & b));
}
}
} catch (NoSuchAlgorithmException e) {
e.printStackTrace();
}
return hexString.toString();
} | 3.68 |
hbase_BackupManifest_addDependentImage | /**
* Add dependent backup image for this backup.
* @param image The direct dependent backup image
*/
public void addDependentImage(BackupImage image) {
this.backupImage.addAncestor(image);
} | 3.68 |
hbase_RequestControllerFactory_create | /**
* Constructs a {@link org.apache.hadoop.hbase.client.RequestController}.
* @param conf The {@link Configuration} to use.
* @return A RequestController which is built according to the configuration.
*/
public static RequestController create(Configuration conf) {
Class<? extends RequestController> clazz = conf.getClass(REQUEST_CONTROLLER_IMPL_CONF_KEY,
SimpleRequestController.class, RequestController.class);
return ReflectionUtils.newInstance(clazz, conf);
} | 3.68 |
hmily_HmilyUndoContext_getHmilyLocks | /**
* Get hmily locks.
*
* @return hmily locks
*/
public Collection<HmilyLock> getHmilyLocks() {
return dataSnapshot.getTuples().stream()
.map(tuple -> new HmilyLock(transId, participantId, resourceId, tuple.getTableName(), Joiner.on("_").join(tuple.getPrimaryKeyValues()))).collect(Collectors.toList());
} | 3.68 |
framework_LegacyWindow_addComponent | /**
* Adds a component to this UI. The component is not added directly to the
* UI, but instead to the content container ({@link #getContent()}).
*
* This method should only be called when the content is a
* {@link ComponentContainer} (default {@link VerticalLayout} or explicitly
* set).
*
* @param component
* the component to add to this UI
*
* @see #getContent()
*/
public void addComponent(Component component) {
getContent().addComponent(component);
} | 3.68 |
flink_ExpressionBuilder_aggDecimalPlus | /**
* Used only for implementing SUM/AVG aggregations (with and without retractions) on a Decimal
* type to avoid overriding decimal precision/scale calculation for sum/avg with the rules
* applied for the normal plus.
*/
@Internal
public static UnresolvedCallExpression aggDecimalPlus(Expression input1, Expression input2) {
return call(AGG_DECIMAL_PLUS, input1, input2);
} | 3.68 |
morf_AnalyseTable_apply | /**
* Before an upgrade step is run, if the table for analysis is not present, an illegal argument exception is thrown
* to prevent the upgrade step from starting.
*/
@Override
public Schema apply(Schema schema) {
if (!schema.tableExists(tableName.toUpperCase())) {
throw new IllegalArgumentException("Cannot analyse table [" + tableName + "] as it does not exist.");
}
return schema;
} | 3.68 |
hudi_TimelineUtils_getAffectedPartitions | /**
* Returns partitions that have been modified including internal operations such as clean in the passed timeline.
*/
public static List<String> getAffectedPartitions(HoodieTimeline timeline) {
return timeline.filterCompletedInstants().getInstantsAsStream().flatMap(s -> {
switch (s.getAction()) {
case COMMIT_ACTION:
case DELTA_COMMIT_ACTION:
try {
HoodieCommitMetadata commitMetadata = HoodieCommitMetadata.fromBytes(timeline.getInstantDetails(s).get(), HoodieCommitMetadata.class);
return commitMetadata.getPartitionToWriteStats().keySet().stream();
} catch (IOException e) {
throw new HoodieIOException("Failed to get partitions written at " + s, e);
}
case REPLACE_COMMIT_ACTION:
try {
HoodieReplaceCommitMetadata commitMetadata = HoodieReplaceCommitMetadata.fromBytes(
timeline.getInstantDetails(s).get(), HoodieReplaceCommitMetadata.class);
Set<String> partitions = new HashSet<>();
partitions.addAll(commitMetadata.getPartitionToReplaceFileIds().keySet());
partitions.addAll(commitMetadata.getPartitionToWriteStats().keySet());
return partitions.stream();
} catch (IOException e) {
throw new HoodieIOException("Failed to get partitions modified at " + s, e);
}
case HoodieTimeline.CLEAN_ACTION:
try {
HoodieCleanMetadata cleanMetadata = TimelineMetadataUtils.deserializeHoodieCleanMetadata(timeline.getInstantDetails(s).get());
return cleanMetadata.getPartitionMetadata().keySet().stream();
} catch (IOException e) {
throw new HoodieIOException("Failed to get partitions cleaned at " + s, e);
}
case HoodieTimeline.ROLLBACK_ACTION:
try {
return TimelineMetadataUtils.deserializeHoodieRollbackMetadata(timeline.getInstantDetails(s).get()).getPartitionMetadata().keySet().stream();
} catch (IOException e) {
throw new HoodieIOException("Failed to get partitions rolledback at " + s, e);
}
case HoodieTimeline.RESTORE_ACTION:
try {
HoodieRestoreMetadata restoreMetadata = TimelineMetadataUtils.deserializeAvroMetadata(timeline.getInstantDetails(s).get(), HoodieRestoreMetadata.class);
return restoreMetadata.getHoodieRestoreMetadata().values().stream()
.flatMap(Collection::stream)
.flatMap(rollbackMetadata -> rollbackMetadata.getPartitionMetadata().keySet().stream());
} catch (IOException e) {
throw new HoodieIOException("Failed to get partitions restored at " + s, e);
}
case HoodieTimeline.SAVEPOINT_ACTION:
try {
return TimelineMetadataUtils.deserializeHoodieSavepointMetadata(timeline.getInstantDetails(s).get()).getPartitionMetadata().keySet().stream();
} catch (IOException e) {
throw new HoodieIOException("Failed to get partitions savepoint at " + s, e);
}
case HoodieTimeline.COMPACTION_ACTION:
// compaction is not a completed instant. So no need to consider this action.
return Stream.empty();
default:
throw new HoodieIOException("unknown action in timeline " + s.getAction());
}
}).distinct().filter(s -> !s.isEmpty()).collect(Collectors.toList());
} | 3.68 |
flink_ExtractionUtils_collectAnnotationsOfClass | /**
* Collects all annotations of the given type defined in the current class or superclasses.
* Duplicates are ignored.
*/
static <T extends Annotation> Set<T> collectAnnotationsOfClass(
Class<T> annotation, Class<?> annotatedClass) {
final List<Class<?>> classHierarchy = new ArrayList<>();
Class<?> currentClass = annotatedClass;
while (currentClass != null) {
classHierarchy.add(currentClass);
currentClass = currentClass.getSuperclass();
}
// convert to top down
Collections.reverse(classHierarchy);
return classHierarchy.stream()
.flatMap(c -> Stream.of(c.getAnnotationsByType(annotation)))
.collect(Collectors.toCollection(LinkedHashSet::new));
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.