name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
rocketmq-connect_DebeziumMysqlConnector_getConnectorClass | /**
* get connector class
*/
@Override
public String getConnectorClass() {
return DEFAULT_CONNECTOR;
} | 3.68 |
framework_BasicEventProvider_getEvents | /*
* (non-Javadoc)
*
* @see
* com.vaadin.addon.calendar.event.CalendarEventProvider#getEvents(java.
* util.Date, java.util.Date)
*/
@Override
public List<CalendarEvent> getEvents(Date startDate, Date endDate) {
List<CalendarEvent> activeEvents = new ArrayList<CalendarEvent>();
for (CalendarEvent ev : eventList) {
long from = startDate.getTime();
long to = endDate.getTime();
if (ev.getStart() != null && ev.getEnd() != null) {
long f = ev.getStart().getTime();
long t = ev.getEnd().getTime();
// Select only events that overlaps with startDate and
// endDate.
if ((f <= to && f >= from) || (t >= from && t <= to)
|| (f <= from && t >= to)) {
activeEvents.add(ev);
}
}
}
return activeEvents;
} | 3.68 |
framework_Table_getColumnExpandRatio | /**
* Gets the column expand ratio for a column. See
* {@link #setColumnExpandRatio(Object, float)}
*
* @param propertyId
* columns property id
* @return the expandRatio used to divide excess space for this column
*/
public float getColumnExpandRatio(Object propertyId) {
final Float width = columnExpandRatios.get(propertyId);
if (width == null) {
return -1;
}
return width.floatValue();
} | 3.68 |
hbase_FavoredNodeAssignmentHelper_generateMissingFavoredNodeMultiRack | /*
* Generates a missing FN based on the input favoredNodes and also the nodes to be skipped. Get
* the current layout of favored nodes arrangement and nodes to be excluded and get a random node
* that goes with HDFS block placement. Eg: If the existing nodes are on one rack, generate one
* from another rack. We exclude as much as possible so the random selection has more chance to
* generate a node within a few iterations, ideally 1.
*/
private ServerName generateMissingFavoredNodeMultiRack(List<ServerName> favoredNodes,
List<ServerName> excludeNodes) throws IOException {
Set<String> racks = Sets.newHashSet();
Map<String, Set<ServerName>> rackToFNMapping = new HashMap<>();
// Lets understand the current rack distribution of the FN
for (ServerName sn : favoredNodes) {
String rack = getRackOfServer(sn);
racks.add(rack);
Set<ServerName> serversInRack = rackToFNMapping.get(rack);
if (serversInRack == null) {
serversInRack = Sets.newHashSet();
rackToFNMapping.put(rack, serversInRack);
}
serversInRack.add(sn);
}
// What racks should be skipped while getting a FN?
Set<String> skipRackSet = Sets.newHashSet();
/*
* If both the FN are from the same rack, then we don't want to generate another FN on the same
* rack. If that rack fails, the region would be unavailable.
*/
if (racks.size() == 1 && favoredNodes.size() > 1) {
skipRackSet.add(racks.iterator().next());
}
/*
* If there are no free nodes on the existing racks, we should skip those racks too. We can
* reduce the number of iterations for FN selection.
*/
for (String rack : racks) {
if (
getServersFromRack(rack) != null
&& rackToFNMapping.get(rack).size() == getServersFromRack(rack).size()
) {
skipRackSet.add(rack);
}
}
Set<ServerName> favoredNodeSet = Sets.newHashSet(favoredNodes);
if (excludeNodes != null && excludeNodes.size() > 0) {
favoredNodeSet.addAll(excludeNodes);
}
/*
* Lets get a random rack by excluding skipRackSet and generate a random FN from that rack.
*/
int i = 0;
Set<String> randomRacks = Sets.newHashSet();
ServerName newServer = null;
do {
String randomRack = this.getOneRandomRack(skipRackSet);
newServer = this.getOneRandomServer(randomRack, favoredNodeSet);
randomRacks.add(randomRack);
i++;
} while ((i < MAX_ATTEMPTS_FN_GENERATION) && (newServer == null));
if (newServer == null) {
if (LOG.isTraceEnabled()) {
LOG.trace(String.format(
"Unable to generate additional favored nodes for %s after "
+ "considering racks %s and skip rack %s with a unique rack list of %s and rack "
+ "to RS map of %s and RS to rack map of %s",
StringUtils.join(favoredNodes, ","), randomRacks, skipRackSet, uniqueRackList,
rackToRegionServerMap, regionServerToRackMap));
}
throw new IOException(
" Unable to generate additional favored nodes for " + StringUtils.join(favoredNodes, ","));
}
return newServer;
} | 3.68 |
hbase_SecureBulkLoadManager_isFile | /**
* Check if the path is referencing a file. This is mainly needed to avoid symlinks.
* @return true if the p is a file
*/
private boolean isFile(Path p) throws IOException {
FileStatus status = srcFs.getFileStatus(p);
boolean isFile = !status.isDirectory();
try {
isFile =
isFile && !(Boolean) Methods.call(FileStatus.class, status, "isSymlink", null, null);
} catch (Exception e) {
}
return isFile;
} | 3.68 |
flink_RawFormatDeserializationSchema_createConverter | /** Creates a runtime converter. */
private static DeserializationRuntimeConverter createConverter(
LogicalType type, String charsetName, boolean isBigEndian) {
switch (type.getTypeRoot()) {
case CHAR:
case VARCHAR:
return createStringConverter(charsetName);
case VARBINARY:
case BINARY:
return data -> data;
case RAW:
return RawValueData::fromBytes;
case BOOLEAN:
return data -> data[0] != 0;
case TINYINT:
return data -> data[0];
case SMALLINT:
return createEndiannessAwareConverter(
isBigEndian,
segment -> segment.getShortBigEndian(0),
segment -> segment.getShortLittleEndian(0));
case INTEGER:
return createEndiannessAwareConverter(
isBigEndian,
segment -> segment.getIntBigEndian(0),
segment -> segment.getIntLittleEndian(0));
case BIGINT:
return createEndiannessAwareConverter(
isBigEndian,
segment -> segment.getLongBigEndian(0),
segment -> segment.getLongLittleEndian(0));
case FLOAT:
return createEndiannessAwareConverter(
isBigEndian,
segment -> segment.getFloatBigEndian(0),
segment -> segment.getFloatLittleEndian(0));
case DOUBLE:
return createEndiannessAwareConverter(
isBigEndian,
segment -> segment.getDoubleBigEndian(0),
segment -> segment.getDoubleLittleEndian(0));
default:
throw new UnsupportedOperationException(
"'raw' format currently doesn't support type: " + type);
}
} | 3.68 |
flink_SingleInputGate_setupChannels | /** Assign the exclusive buffers to all remote input channels directly for credit-based mode. */
@VisibleForTesting
public void setupChannels() throws IOException {
// Allocate enough exclusive and floating buffers to guarantee that job can make progress.
// Note: An exception will be thrown if there is no buffer available in the given timeout.
// First allocate a single floating buffer to avoid potential deadlock when the exclusive
// buffer is 0. See FLINK-24035 for more information.
bufferPool.reserveSegments(1);
// Next allocate the exclusive buffers per channel when the number of exclusive buffer is
// larger than 0.
synchronized (requestLock) {
for (InputChannel inputChannel : inputChannels.values()) {
inputChannel.setup();
}
}
} | 3.68 |
morf_AbstractSelectStatementBuilder_where | /**
* Specifies the where criteria. For use in code where the criteria are being generated dynamically.
* The iterable can be empty but not null.
*
* @param criteria the criteria to filter the results by. They will be <i>AND</i>ed together.
* @return this, for method chaining.
*/
public T where(Iterable<Criterion> criteria) {
if (criteria == null) {
throw new IllegalArgumentException("No criterion was given in the where clause");
}
if (!Iterables.isEmpty(criteria)) {
whereCriterion = new Criterion(Operator.AND, criteria);
}
return castToChild(this);
} | 3.68 |
hbase_HBaseTestingUtility_predicateNoRegionsInTransition | /**
* Returns a {@link Predicate} for checking that there are no regions in transition in master
*/
public ExplainingPredicate<IOException> predicateNoRegionsInTransition() {
return new ExplainingPredicate<IOException>() {
@Override
public String explainFailure() throws IOException {
final RegionStates regionStates =
getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
return "found in transition: " + regionStates.getRegionsInTransition().toString();
}
@Override
public boolean evaluate() throws IOException {
HMaster master = getMiniHBaseCluster().getMaster();
if (master == null) return false;
AssignmentManager am = master.getAssignmentManager();
if (am == null) return false;
return !am.hasRegionsInTransition();
}
};
} | 3.68 |
hbase_BitComparator_toByteArray | /** Returns The comparator serialized using pb */
@Override
public byte[] toByteArray() {
ComparatorProtos.BitComparator.Builder builder = ComparatorProtos.BitComparator.newBuilder();
builder.setComparable(ProtobufUtil.toByteArrayComparable(this.value));
ComparatorProtos.BitComparator.BitwiseOp bitwiseOpPb =
ComparatorProtos.BitComparator.BitwiseOp.valueOf(bitOperator.name());
builder.setBitwiseOp(bitwiseOpPb);
return builder.build().toByteArray();
} | 3.68 |
morf_ViewURLAsFile_downloadFileFromHttpUrl | /**
* Downloads a file over HTTP/HTTPS.
*
* @param url The url to download from.
* @param urlUsername The username for the url.
* @param urlPassword The password for the url.
* @param file The file to populate from the download.
*/
private void downloadFileFromHttpUrl(final URL url, final String urlUsername, final String urlPassword, File file) {
// -- Create connection to URL...
//
URLConnection urlConnection;
try {
urlConnection = url.openConnection();
} catch (IOException e) {
throw new RuntimeException("Error opening connection to URL [" + url + "]", e);
}
// -- Set up authentication if required...
//
if (urlUsername != null && urlPassword != null) {
String userpass = urlUsername + ":" + urlPassword;
String basicAuth = "Basic " + new String(Base64.getEncoder().encode(userpass.getBytes(StandardCharsets.UTF_8)), Charsets.US_ASCII);
urlConnection.setRequestProperty("Authorization", basicAuth);
}
// -- Download the file...
//
try (
ReadableByteChannel readableByteChannel = Channels.newChannel(urlConnection.getInputStream());
FileOutputStream fileOutputStream = new FileOutputStream(file);
FileChannel fileChannel = fileOutputStream.getChannel()
) {
fileChannel.transferFrom(readableByteChannel, 0, Long.MAX_VALUE);
log.debug("Successfully downloaded file [" + url + "] to temp file [" + file + "]");
} catch (IOException e) {
throw new RuntimeException("Error downloading data set from [" + url + "]", e);
}
} | 3.68 |
hbase_MobUtils_getMobColumnFamilies | /**
* Get list of Mob column families (if any exists)
* @param htd table descriptor
* @return list of Mob column families
*/
public static List<ColumnFamilyDescriptor> getMobColumnFamilies(TableDescriptor htd) {
List<ColumnFamilyDescriptor> fams = new ArrayList<ColumnFamilyDescriptor>();
ColumnFamilyDescriptor[] hcds = htd.getColumnFamilies();
for (ColumnFamilyDescriptor hcd : hcds) {
if (hcd.isMobEnabled()) {
fams.add(hcd);
}
}
return fams;
} | 3.68 |
hadoop_FederationCache_buildPolicyConfigMap | /**
* According to the cacheRequest, build PolicyConfigMap.
*
* @param cacheRequest CacheRequest.
* @return PolicyConfigMap.
*/
public static Map<String, SubClusterPolicyConfiguration> buildPolicyConfigMap(
CacheRequest<String, ?> cacheRequest){
Object value = cacheRequest.value;
SubClusterPolicyConfigurationCacheResponse response =
SubClusterPolicyConfigurationCacheResponse.class.cast(value);
List<SubClusterPolicyConfiguration> subClusters = response.getList();
return buildPolicyConfigMap(subClusters);
} | 3.68 |
graphhopper_CarAccessParser_isForwardOneway | /**
* make sure that isOneway is called before
*/
protected boolean isForwardOneway(ReaderWay way) {
return !way.hasTag("oneway", "-1")
&& !way.hasTag("vehicle:forward", restrictedValues)
&& !way.hasTag("motor_vehicle:forward", restrictedValues);
} | 3.68 |
rocketmq-connect_MetricUtils_metricNameToString | /**
* MetricName to string
*
* @param name
* @return
*/
public static String metricNameToString(MetricName name) {
if (StringUtils.isEmpty(name.getType())) {
name.setType("none");
}
StringBuilder sb = new StringBuilder(ROCKETMQ_CONNECT)
.append(name.getGroup())
.append(SPLIT_COMMA)
.append(name.getName())
.append(SPLIT_COMMA)
.append(name.getType());
for (Map.Entry<String, String> entry : name.getTags().entrySet()) {
sb.append(SPLIT_COMMA)
.append(entry.getKey())
.append(SPLIT_KV)
.append(entry.getValue());
}
return sb.toString();
} | 3.68 |
hbase_RestoreTool_getRegionList | /**
* Gets region list
* @param tableArchivePath table archive path
* @return RegionList region list
* @throws IOException exception
*/
ArrayList<Path> getRegionList(Path tableArchivePath) throws IOException {
ArrayList<Path> regionDirList = new ArrayList<>();
FileStatus[] children = fs.listStatus(tableArchivePath);
for (FileStatus childStatus : children) {
// here child refer to each region(Name)
Path child = childStatus.getPath();
regionDirList.add(child);
}
return regionDirList;
} | 3.68 |
flink_Conditions_haveLeafReturnTypes | /**
* Tests leaf return types of a method against the given predicate.
*
* <p>See {@link #haveLeafTypes(DescribedPredicate)} for details.
*/
public static ArchCondition<JavaMethod> haveLeafReturnTypes(
DescribedPredicate<JavaClass> typePredicate) {
return new ArchCondition<JavaMethod>(
"have leaf return types" + typePredicate.getDescription()) {
@Override
public void check(JavaMethod method, ConditionEvents events) {
for (JavaClass leafType : getLeafTypes(method.getReturnType())) {
if (!isJavaClass(leafType)) {
continue;
}
if (!typePredicate.test(leafType)) {
final String message =
String.format(
"%s: Returned leaf type %s does not satisfy: %s",
method.getFullName(),
leafType.getName(),
typePredicate.getDescription());
events.add(SimpleConditionEvent.violated(method, message));
}
}
}
};
} | 3.68 |
querydsl_BeanMap_getType | /**
* Returns the type of the property with the given name.
*
* @param name the name of the property
* @return the type of the property, or {@code null} if no such
* property exists
*/
public Class<?> getType(String name) {
return types.get(name);
} | 3.68 |
flink_ModuleFactory_createModule | /** Creates and configures a {@link Module}. */
default Module createModule(Context context) {
throw new ModuleException("Module factories must implement createModule(Context).");
} | 3.68 |
hbase_HRegion_getNextSequenceId | /**
* Method to safely get the next sequence number.
* @return Next sequence number unassociated with any actual edit.
*/
protected long getNextSequenceId(final WAL wal) throws IOException {
WriteEntry we = mvcc.begin();
mvcc.completeAndWait(we);
return we.getWriteNumber();
} | 3.68 |
hbase_HRegion_sawWrongRegion | /**
* Records that a {@link WrongRegionException} has been observed.
*/
void sawWrongRegion() {
wrongRegion = true;
} | 3.68 |
AreaShop_CommandAreaShop_canExecute | /**
* Check if this Command instance can execute the given command and arguments.
* @param command The command to check for execution
* @param args The arguments to check
* @return true if it can execute the command, false otherwise
*/
public boolean canExecute(Command command, String[] args) {
String commandString = command.getName() + " " + StringUtils.join(args, " ");
if(commandString.length() > getCommandStart().length()) {
return commandString.toLowerCase().startsWith(getCommandStart().toLowerCase() + " ");
}
return commandString.toLowerCase().startsWith(getCommandStart().toLowerCase());
} | 3.68 |
framework_Table_setContainerDataSource | /**
* Sets the container data source and the columns that will be visible.
* Columns are shown in the collection's iteration order.
* <p>
* Keeps propertyValueConverters if the corresponding id exists in the new
* data source and is of a compatible type.
* </p>
*
* @see Table#setContainerDataSource(Container)
* @see Table#setVisibleColumns(Object[])
* @see Table#setConverter(Object, Converter<String, ?>)
*
* @param newDataSource
* the new data source.
* @param visibleIds
* IDs of the visible columns
*/
public void setContainerDataSource(Container newDataSource,
Collection<?> visibleIds) {
disableContentRefreshing();
if (newDataSource == null) {
newDataSource = new IndexedContainer();
}
if (visibleIds == null) {
visibleIds = new ArrayList<Object>();
}
// Retain propertyValueConverters if their corresponding ids are
// properties of the new
// data source and are of a compatible type
if (propertyValueConverters != null) {
Collection<?> newPropertyIds = newDataSource
.getContainerPropertyIds();
LinkedList<Object> retainableValueConverters = new LinkedList<Object>();
for (Object propertyId : newPropertyIds) {
Converter<String, ?> converter = getConverter(propertyId);
if (converter != null) {
if (typeIsCompatible(converter.getModelType(),
newDataSource.getType(propertyId))) {
retainableValueConverters.add(propertyId);
}
}
}
propertyValueConverters.keySet()
.retainAll(retainableValueConverters);
}
// Assures that the data source is ordered by making unordered
// containers ordered by wrapping them
if (newDataSource instanceof Container.Ordered) {
super.setContainerDataSource(newDataSource);
} else {
super.setContainerDataSource(
new ContainerOrderedWrapper(newDataSource));
}
// Resets page position
currentPageFirstItemId = null;
currentPageFirstItemIndex = 0;
// Resets column properties
if (collapsedColumns != null) {
collapsedColumns.clear();
}
// don't add the same id twice
Collection<Object> col = new LinkedList<Object>();
for (Object id : visibleIds) {
if (!col.contains(id)) {
col.add(id);
}
}
setVisibleColumns(col.toArray());
// Assure visual refresh
resetPageBuffer();
enableContentRefreshing(true);
} | 3.68 |
hadoop_IngressPortBasedResolver_getServerProperties | /**
* Identify the Sasl Properties to be used for a connection with a client.
* @param clientAddress client's address
* @param ingressPort the port that the client is connecting
* @return the sasl properties to be used for the connection.
*/
@Override
@VisibleForTesting
public Map<String, String> getServerProperties(InetAddress clientAddress,
int ingressPort) {
LOG.debug("Resolving SASL properties for " + clientAddress + " "
+ ingressPort);
if (!portPropMapping.containsKey(ingressPort)) {
LOG.warn("An un-configured port is being requested " + ingressPort
+ " using default");
return getDefaultProperties();
}
return portPropMapping.get(ingressPort);
} | 3.68 |
pulsar_ModularLoadManagerImpl_updateBundleUnloadingMetrics | /**
* As leader broker, update bundle unloading metrics.
*
* @param bundlesToUnload
*/
private void updateBundleUnloadingMetrics(Multimap<String, String> bundlesToUnload) {
unloadBrokerCount += bundlesToUnload.keySet().size();
unloadBundleCount += bundlesToUnload.values().size();
List<Metrics> metrics = new ArrayList<>();
Map<String, String> dimensions = new HashMap<>();
dimensions.put("metric", "bundleUnloading");
Metrics m = Metrics.create(dimensions);
m.put("brk_lb_unload_broker_total", unloadBrokerCount);
m.put("brk_lb_unload_bundle_total", unloadBundleCount);
metrics.add(m);
this.bundleUnloadMetrics.set(metrics);
} | 3.68 |
flink_PartitionLoader_commitPartition | /**
* Reuse of PartitionCommitPolicy mechanisms. The default in Batch mode is metastore and
* success-file.
*/
private void commitPartition(LinkedHashMap<String, String> partitionSpec, Path path)
throws Exception {
PartitionCommitPolicy.Context context = new CommitPolicyContextImpl(partitionSpec, path);
for (PartitionCommitPolicy policy : policies) {
if (policy instanceof MetastoreCommitPolicy) {
if (partitionSpec.isEmpty()) {
// Non partition table skip commit meta data.
continue;
}
((MetastoreCommitPolicy) policy).setMetastore(metaStore);
}
policy.commit(context);
}
} | 3.68 |
graphhopper_GTFSFeed_getInterpolatedStopTimesForTrip | /**
* For the given trip ID, fetch all the stop times in order, and interpolate stop-to-stop travel times.
*/
public Iterable<StopTime> getInterpolatedStopTimesForTrip (String trip_id) throws FirstAndLastStopsDoNotHaveTimes {
// clone stop times so as not to modify base GTFS structures
StopTime[] stopTimes = StreamSupport.stream(getOrderedStopTimesForTrip(trip_id).spliterator(), false)
.map(st -> st.clone())
.toArray(i -> new StopTime[i]);
// avoid having to make sure that the array has length below.
if (stopTimes.length == 0) return Collections.emptyList();
// first pass: set all partially filled stop times
for (StopTime st : stopTimes) {
if (st.arrival_time != Entity.INT_MISSING && st.departure_time == Entity.INT_MISSING) {
st.departure_time = st.arrival_time;
}
if (st.arrival_time == Entity.INT_MISSING && st.departure_time != Entity.INT_MISSING) {
st.arrival_time = st.departure_time;
}
}
// quick check: ensure that first and last stops have times.
// technically GTFS requires that both arrival_time and departure_time be filled at both the first and last stop,
// but we are slightly more lenient and only insist that one of them be filled at both the first and last stop.
// The meaning of the first stop's arrival time is unclear, and same for the last stop's departure time (except
// in the case of interlining).
// it's fine to just check departure time, as the above pass ensures that all stop times have either both
// arrival and departure times, or neither
if (stopTimes[0].departure_time == Entity.INT_MISSING || stopTimes[stopTimes.length - 1].departure_time == Entity.INT_MISSING) {
throw new FirstAndLastStopsDoNotHaveTimes();
}
// second pass: fill complete stop times
int startOfInterpolatedBlock = -1;
for (int stopTime = 0; stopTime < stopTimes.length; stopTime++) {
if (stopTimes[stopTime].departure_time == Entity.INT_MISSING && startOfInterpolatedBlock == -1) {
startOfInterpolatedBlock = stopTime;
}
else if (stopTimes[stopTime].departure_time != Entity.INT_MISSING && startOfInterpolatedBlock != -1) {
// we have found the end of the interpolated section
int nInterpolatedStops = stopTime - startOfInterpolatedBlock;
double totalLengthOfInterpolatedSection = 0;
double[] lengthOfInterpolatedSections = new double[nInterpolatedStops];
for (int stopTimeToInterpolate = startOfInterpolatedBlock, i = 0; stopTimeToInterpolate < stopTime; stopTimeToInterpolate++, i++) {
Stop start = stops.get(stopTimes[stopTimeToInterpolate - 1].stop_id);
Stop end = stops.get(stopTimes[stopTimeToInterpolate].stop_id);
double segLen = fastDistance(start.stop_lat, start.stop_lon, end.stop_lat, end.stop_lon);
totalLengthOfInterpolatedSection += segLen;
lengthOfInterpolatedSections[i] = segLen;
}
// add the segment post-last-interpolated-stop
Stop start = stops.get(stopTimes[stopTime - 1].stop_id);
Stop end = stops.get(stopTimes[stopTime].stop_id);
totalLengthOfInterpolatedSection += fastDistance(start.stop_lat, start.stop_lon, end.stop_lat, end.stop_lon);
int departureBeforeInterpolation = stopTimes[startOfInterpolatedBlock - 1].departure_time;
int arrivalAfterInterpolation = stopTimes[stopTime].arrival_time;
int totalTime = arrivalAfterInterpolation - departureBeforeInterpolation;
double lengthSoFar = 0;
for (int stopTimeToInterpolate = startOfInterpolatedBlock, i = 0; stopTimeToInterpolate < stopTime; stopTimeToInterpolate++, i++) {
lengthSoFar += lengthOfInterpolatedSections[i];
int time = (int) (departureBeforeInterpolation + totalTime * (lengthSoFar / totalLengthOfInterpolatedSection));
stopTimes[stopTimeToInterpolate].arrival_time = stopTimes[stopTimeToInterpolate].departure_time = time;
}
// we're done with this block
startOfInterpolatedBlock = -1;
}
}
return Arrays.asList(stopTimes);
} | 3.68 |
hbase_StoreFileWriter_getGeneralBloomWriter | /**
* For unit testing only.
* @return the Bloom filter used by this writer.
*/
BloomFilterWriter getGeneralBloomWriter() {
return generalBloomFilterWriter;
} | 3.68 |
hbase_SnappyCodec_isLoaded | /** Return true if the native shared libraries were loaded; false otherwise. */
public static boolean isLoaded() {
return loaded;
} | 3.68 |
hbase_QuotaFilter_setRegionServerFilter | /**
* Set the region server filter regex
* @param regex the region server filter
* @return the quota filter object
*/
public QuotaFilter setRegionServerFilter(final String regex) {
this.regionServerRegex = regex;
hasFilters |= StringUtils.isNotEmpty(regex);
return this;
} | 3.68 |
framework_BrowserInfo_getWebkitVersion | /**
* Returns the WebKit version if the browser is WebKit based. The WebKit
* version returned is the major version e.g., 523.
*
* @return The WebKit version or -1 if the browser is not WebKit based
*/
public float getWebkitVersion() {
if (!browserDetails.isWebKit()) {
return -1;
}
return browserDetails.getBrowserEngineVersion();
} | 3.68 |
flink_SingleInputPlanNode_getPredecessor | /**
* Gets the predecessor of this node, i.e. the source of the input channel.
*
* @return The predecessor of this node.
*/
public PlanNode getPredecessor() {
return this.input.getSource();
} | 3.68 |
hbase_RegionNormalizerManager_isNormalizerOn | /**
* Return {@code true} if region normalizer is on, {@code false} otherwise
*/
public boolean isNormalizerOn() {
return regionNormalizerStateStore.get();
} | 3.68 |
morf_Function_count | /**
* Helper method to create an instance of the "count" SQL function.
*
* @param field the field to evaluate in the count function.
*
* @return an instance of a count function
*/
public static Function count(AliasedField field) {
return new Function(FunctionType.COUNT, field);
} | 3.68 |
hadoop_BondedS3AStatisticsContext_newDelegationTokenStatistics | /**
* Create a delegation token statistics instance.
* @return an instance of delegation token statistics
*/
@Override
public DelegationTokenStatistics newDelegationTokenStatistics() {
return getInstrumentation().newDelegationTokenStatistics();
} | 3.68 |
hadoop_BigDecimalSplitter_tryDivide | /**
* Divide numerator by denominator. If impossible in exact mode, use rounding.
*/
protected BigDecimal tryDivide(BigDecimal numerator, BigDecimal denominator) {
try {
return numerator.divide(denominator);
} catch (ArithmeticException ae) {
return numerator.divide(denominator, BigDecimal.ROUND_HALF_UP);
}
} | 3.68 |
hadoop_TypedBytesInput_readRawBool | /**
* Reads the raw bytes following a <code>Type.BOOL</code> code.
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawBool() throws IOException {
byte[] bytes = new byte[2];
bytes[0] = (byte) Type.BOOL.code;
in.readFully(bytes, 1, 1);
return bytes;
} | 3.68 |
framework_ExpandingContainer_addItemAfter | /**
* @throws UnsupportedOperationException
* always
*/
@Override
public Item addItemAfter(Object previousItemId, Object newItemId) {
throw new UnsupportedOperationException();
} | 3.68 |
framework_VComboBox_actOnEnteredValueAfterFiltering | /**
* Create/select a suggestion based on the used entered string. This
* method is called after filtering has completed with the given string.
*
* @param enteredItemValue
* user entered string
*/
public void actOnEnteredValueAfterFiltering(String enteredItemValue) {
debug("VComboBox.SM: doPostFilterSelectedItemAction()");
final MenuItem item = getSelectedItem();
boolean handledOnServer = handledNewItem == enteredItemValue;
if (handledOnServer) {
// clear value to mark it as handled
handledNewItem = null;
}
// check for exact match in menu
int p = getItems().size();
if (p > 0) {
for (int i = 0; i < p; i++) {
final MenuItem potentialExactMatch = getItems().get(i);
if (potentialExactMatch.getText()
.equals(enteredItemValue)) {
selectItem(potentialExactMatch);
// do not send a value change event if null was and
// stays selected
if (!"".equals(enteredItemValue)
|| selectedOptionKey != null
&& !selectedOptionKey.isEmpty()) {
doItemAction(potentialExactMatch, true);
}
suggestionPopup.hide();
lastNewItemString = null;
connector.clearNewItemHandlingIfMatch(enteredItemValue);
return;
}
}
}
if (!handledOnServer && "".equals(enteredItemValue)
&& nullSelectionAllowed) {
onNullSelected();
} else if (!handledOnServer && allowNewItems) {
if (!enteredItemValue.equals(lastNewItemString)) {
// Store last sent new item string to avoid double sends
lastNewItemString = enteredItemValue;
connector.sendNewItem(enteredItemValue);
// TODO try to select the new value if it matches what was
// sent for V7 compatibility
}
} else if (item != null && !"".equals(lastFilter)
&& item.getText().toLowerCase(Locale.ROOT)
.contains(lastFilter.toLowerCase(Locale.ROOT))) {
doItemAction(item, true);
} else {
// currentSuggestion has key="" for nullselection
if (currentSuggestion != null
&& !currentSuggestion.key.isEmpty()) {
// An item (not null) selected
String text = currentSuggestion.getReplacementString();
setText(text);
selectedOptionKey = currentSuggestion.key;
} else {
onNullSelected();
}
}
suggestionPopup.hide();
if (handledOnServer || !allowNewItems) {
lastNewItemString = null;
}
} | 3.68 |
hadoop_BlockReaderLocalMetrics_collectThreadLocalStates | /**
* Collects states maintained in {@link ThreadLocal}, if any.
*/
public void collectThreadLocalStates() {
shortCircuitReadRollingAverages.collectThreadLocalStates();
} | 3.68 |
hudi_JdbcSource_fetch | /**
* Decide to do a full RDBMS table scan or an incremental scan based on the lastCkptStr. If previous checkpoint
* value exists then we do an incremental scan with a PPD query or else we do a full scan. In certain cases where the
* incremental query fails, we fallback to a full scan.
*
* @param lastCkptStr Last checkpoint.
* @return The pair of {@link Dataset} and current checkpoint.
*/
private Pair<Option<Dataset<Row>>, String> fetch(Option<String> lastCkptStr, long sourceLimit) {
Dataset<Row> dataset;
if (lastCkptStr.isPresent() && !StringUtils.isNullOrEmpty(lastCkptStr.get())) {
dataset = incrementalFetch(lastCkptStr, sourceLimit);
} else {
LOG.info("No checkpoint references found. Doing a full rdbms table fetch");
dataset = fullFetch(sourceLimit);
}
dataset.persist(StorageLevel.fromString(
getStringWithAltKeys(props, JdbcSourceConfig.STORAGE_LEVEL, "MEMORY_AND_DISK_SER")));
boolean isIncremental = getBooleanWithAltKeys(props, JdbcSourceConfig.IS_INCREMENTAL);
Pair<Option<Dataset<Row>>, String> pair = Pair.of(Option.of(dataset), checkpoint(dataset, isIncremental, lastCkptStr));
dataset.unpersist();
return pair;
} | 3.68 |
flink_DefaultJobGraphStore_localCleanupAsync | /**
* Releases the locks on the specified {@link JobGraph}.
*
* <p>Releasing the locks allows that another instance can delete the job from the {@link
* JobGraphStore}.
*
* @param jobId specifying the job to release the locks for
* @param executor the executor being used for the asynchronous execution of the local cleanup.
* @returns The cleanup result future.
*/
@Override
public CompletableFuture<Void> localCleanupAsync(JobID jobId, Executor executor) {
checkNotNull(jobId, "Job ID");
return runAsyncWithLockAssertRunning(
() -> {
LOG.debug("Releasing job graph {} from {}.", jobId, jobGraphStateHandleStore);
jobGraphStateHandleStore.release(jobGraphStoreUtil.jobIDToName(jobId));
addedJobGraphs.remove(jobId);
LOG.info("Released job graph {} from {}.", jobId, jobGraphStateHandleStore);
},
executor);
} | 3.68 |
framework_SliderTooltip_getTicketNumber | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber()
*/
@Override
protected Integer getTicketNumber() {
return 14019;
} | 3.68 |
flink_VertexThreadInfoTrackerBuilder_newBuilder | /**
* Create a new {@link VertexThreadInfoTrackerBuilder}.
*
* @return Builder.
*/
public static VertexThreadInfoTrackerBuilder newBuilder(
GatewayRetriever<ResourceManagerGateway> resourceManagerGatewayRetriever,
ScheduledExecutorService executor,
Time restTimeout) {
return new VertexThreadInfoTrackerBuilder(
resourceManagerGatewayRetriever, executor, restTimeout);
} | 3.68 |
framework_VCalendar_setRangeMoveAllowed | /**
* Is moving a range allowed.
*
* @param rangeMoveAllowed
* Is it allowed
*/
public void setRangeMoveAllowed(boolean rangeMoveAllowed) {
this.rangeMoveAllowed = rangeMoveAllowed;
} | 3.68 |
morf_RenameIndex_getTableName | /**
* Gets the name of the table to change.
*
* @return the name of the table to change
*/
public String getTableName() {
return tableName;
} | 3.68 |
flink_DateTimeUtils_fromTemporalAccessor | /**
* This is similar to {@link LocalDateTime#from(TemporalAccessor)}, but it's less strict and
* introduces default values.
*/
private static LocalDateTime fromTemporalAccessor(TemporalAccessor accessor, int precision) {
// complement year with 1970
int year = accessor.isSupported(YEAR) ? accessor.get(YEAR) : 1970;
// complement month with 1
int month = accessor.isSupported(MONTH_OF_YEAR) ? accessor.get(MONTH_OF_YEAR) : 1;
// complement day with 1
int day = accessor.isSupported(DAY_OF_MONTH) ? accessor.get(DAY_OF_MONTH) : 1;
// complement hour with 0
int hour = accessor.isSupported(HOUR_OF_DAY) ? accessor.get(HOUR_OF_DAY) : 0;
// complement minute with 0
int minute = accessor.isSupported(MINUTE_OF_HOUR) ? accessor.get(MINUTE_OF_HOUR) : 0;
// complement second with 0
int second = accessor.isSupported(SECOND_OF_MINUTE) ? accessor.get(SECOND_OF_MINUTE) : 0;
// complement nano_of_second with 0
int nanoOfSecond = accessor.isSupported(NANO_OF_SECOND) ? accessor.get(NANO_OF_SECOND) : 0;
if (precision == 0) {
nanoOfSecond = 0;
} else if (precision != 9) {
nanoOfSecond = (int) floor(nanoOfSecond, powerX(10, 9 - precision));
}
return LocalDateTime.of(year, month, day, hour, minute, second, nanoOfSecond);
} | 3.68 |
morf_DatabaseMetaDataProvider_setAdditionalColumnMetadata | /**
* Sets additional column information.
*
* @param tableName Name of the table.
* @param column Column builder to set to.
* @param columnResultSet Result set to be read.
* @return Resulting column builder.
* @throws SQLException Upon errors.
*/
@SuppressWarnings("unused")
protected ColumnBuilder setAdditionalColumnMetadata(RealName tableName, ColumnBuilder column, ResultSet columnResultSet) throws SQLException {
return column;
} | 3.68 |
hudi_HoodieFunctionalIndexConfig_storeProperties | /**
* Write the properties to the given output stream and return the table checksum.
*
* @param props - properties to be written
* @param outputStream - output stream to which properties will be written
* @return return the table checksum
*/
private static String storeProperties(Properties props, FSDataOutputStream outputStream) throws IOException {
final String checksum;
if (isValidChecksum(props)) {
checksum = props.getProperty(INDEX_DEFINITION_CHECKSUM.key());
props.store(outputStream, "Updated at " + Instant.now());
} else {
Properties propsWithChecksum = getOrderedPropertiesWithTableChecksum(props);
propsWithChecksum.store(outputStream, "Properties saved on " + Instant.now());
checksum = propsWithChecksum.getProperty(INDEX_DEFINITION_CHECKSUM.key());
props.setProperty(INDEX_DEFINITION_CHECKSUM.key(), checksum);
}
return checksum;
} | 3.68 |
hudi_StreamerUtil_isWriteCommit | /**
* Returns whether the given instant is a data writing commit.
*
* @param tableType The table type
* @param instant The instant
* @param timeline The timeline
*/
public static boolean isWriteCommit(HoodieTableType tableType, HoodieInstant instant, HoodieTimeline timeline) {
return tableType == HoodieTableType.MERGE_ON_READ
? !instant.getAction().equals(HoodieTimeline.COMMIT_ACTION) // not a compaction
: !ClusteringUtil.isClusteringInstant(instant, timeline); // not a clustering
} | 3.68 |
hudi_UpsertPartitioner_averageBytesPerRecord | /**
* Obtains the average record size based on records written during previous commits. Used for estimating how many
* records pack into one file.
*/
protected static long averageBytesPerRecord(HoodieTimeline commitTimeline, HoodieWriteConfig hoodieWriteConfig) {
long avgSize = hoodieWriteConfig.getCopyOnWriteRecordSizeEstimate();
long fileSizeThreshold = (long) (hoodieWriteConfig.getRecordSizeEstimationThreshold() * hoodieWriteConfig.getParquetSmallFileLimit());
try {
if (!commitTimeline.empty()) {
// Go over the reverse ordered commits to get a more recent estimate of average record size.
Iterator<HoodieInstant> instants = commitTimeline.getReverseOrderedInstants().iterator();
while (instants.hasNext()) {
HoodieInstant instant = instants.next();
HoodieCommitMetadata commitMetadata = HoodieCommitMetadata
.fromBytes(commitTimeline.getInstantDetails(instant).get(), HoodieCommitMetadata.class);
long totalBytesWritten = commitMetadata.fetchTotalBytesWritten();
long totalRecordsWritten = commitMetadata.fetchTotalRecordsWritten();
if (totalBytesWritten > fileSizeThreshold && totalRecordsWritten > 0) {
avgSize = (long) Math.ceil((1.0 * totalBytesWritten) / totalRecordsWritten);
break;
}
}
}
} catch (Throwable t) {
// make this fail safe.
LOG.error("Error trying to compute average bytes/record ", t);
}
return avgSize;
} | 3.68 |
hadoop_CredentialProviderListFactory_initCredentialProvidersMap | /**
* Maps V1 credential providers to either their equivalent SDK V2 class or hadoop provider.
*/
private static Map<String, String> initCredentialProvidersMap() {
Map<String, String> v1v2CredentialProviderMap = new HashMap<>();
v1v2CredentialProviderMap.put(ANONYMOUS_CREDENTIALS_V1,
AnonymousAWSCredentialsProvider.NAME);
v1v2CredentialProviderMap.put(EC2_CONTAINER_CREDENTIALS_V1,
EC2_IAM_CREDENTIALS_V2);
v1v2CredentialProviderMap.put(EC2_IAM_CREDENTIALS_V1,
EC2_IAM_CREDENTIALS_V2);
v1v2CredentialProviderMap.put(ENVIRONMENT_CREDENTIALS_V1,
ENVIRONMENT_CREDENTIALS_V2);
v1v2CredentialProviderMap.put(PROFILE_CREDENTIALS_V1,
PROFILE_CREDENTIALS_V2);
return v1v2CredentialProviderMap;
} | 3.68 |
framework_AbstractSelect_containerPropertySetChange | /**
* Notifies this listener that the Containers contents has changed.
*
* @see Container.PropertySetChangeListener#containerPropertySetChange(Container.PropertySetChangeEvent)
*/
@Override
public void containerPropertySetChange(
Container.PropertySetChangeEvent event) {
firePropertySetChange();
} | 3.68 |
framework_IndexedContainer_clone | /**
* Supports cloning of the IndexedContainer cleanly.
*
* @throws CloneNotSupportedException
* if an object cannot be cloned. .
*
* @deprecated As of 6.6. Cloning support might be removed from
* IndexedContainer in the future
*/
@Deprecated
@Override
public Object clone() throws CloneNotSupportedException {
// Creates the clone
final IndexedContainer nc = new IndexedContainer();
// Clone the shallow properties
nc.setAllItemIds(
getAllItemIds() != null ? new ListSet<Object>(getAllItemIds())
: null);
nc.setItemSetChangeListeners(getItemSetChangeListeners() != null
? new LinkedList<Container.ItemSetChangeListener>(
getItemSetChangeListeners())
: null);
nc.propertyIds = propertyIds != null
? new ArrayList<Object>(propertyIds)
: null;
nc.setPropertySetChangeListeners(getPropertySetChangeListeners() != null
? new LinkedList<Container.PropertySetChangeListener>(
getPropertySetChangeListeners())
: null);
nc.propertyValueChangeListeners = propertyValueChangeListeners != null
? new LinkedList<Property.ValueChangeListener>(
propertyValueChangeListeners)
: null;
nc.readOnlyProperties = readOnlyProperties != null
? new HashSet<Property<?>>(readOnlyProperties)
: null;
nc.singlePropertyValueChangeListeners = singlePropertyValueChangeListeners != null
? new Hashtable<Object, Map<Object, List<Property.ValueChangeListener>>>(
singlePropertyValueChangeListeners)
: null;
nc.types = types != null ? new Hashtable<Object, Class<?>>(types)
: null;
nc.setFilters(new HashSet<Filter>(getFilters()));
nc.setFilteredItemIds(getFilteredItemIds() == null ? null
: new ListSet<Object>(getFilteredItemIds()));
// Clone property-values
if (items == null) {
nc.items = null;
} else {
nc.items = new Hashtable<Object, Map<Object, Object>>();
for (final Iterator<?> i = items.keySet().iterator(); i
.hasNext();) {
final Object id = i.next();
final Hashtable<Object, Object> it = (Hashtable<Object, Object>) items
.get(id);
nc.items.put(id, (Map<Object, Object>) it.clone());
}
}
return nc;
} | 3.68 |
hadoop_RemoteParam_getParameterForContext | /**
* Determine the appropriate value for this parameter based on the location.
*
* @param context Context identifying the location.
* @return A parameter specific to this location.
*/
public Object getParameterForContext(RemoteLocationContext context) {
if (context == null) {
return null;
} else if (this.paramMap != null) {
return this.paramMap.get(context);
} else {
// Default case
return context.getDest();
}
} | 3.68 |
morf_IndexNameDecorator_isUnique | /**
* @see org.alfasoftware.morf.metadata.Index#isUnique()
*/
@Override
public boolean isUnique() {
return index.isUnique();
} | 3.68 |
pulsar_NamespacesBase_internalRemoveReplicatorDispatchRate | /**
* Base method for removeReplicatorDispatchRate v1 and v2.
* Notion: don't re-use this logic.
*/
protected void internalRemoveReplicatorDispatchRate(AsyncResponse asyncResponse) {
validateSuperUserAccessAsync()
.thenCompose(__ -> namespaceResources().setPoliciesAsync(namespaceName, policies -> {
String clusterName = pulsar().getConfiguration().getClusterName();
policies.replicatorDispatchRate.remove(clusterName);
return policies;
})).thenAccept(__ -> {
asyncResponse.resume(Response.noContent().build());
log.info("[{}] Successfully delete the replicatorDispatchRate for cluster on namespace {}",
clientAppId(), namespaceName);
}).exceptionally(ex -> {
resumeAsyncResponseExceptionally(asyncResponse, ex);
log.error("[{}] Failed to delete the replicatorDispatchRate for cluster on namespace {}",
clientAppId(), namespaceName, ex);
return null;
});
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectWhereScript | /**
* Tests a select with a where clause.
*/
@Test
public void testSelectWhereScript() {
SelectStatement stmt = new SelectStatement().from(new TableReference(TEST_TABLE))
.where(eq(new FieldReference(STRING_FIELD), "A0001"));
String value = varCharCast("'A0001'");
String expectedSql = "SELECT * FROM " + tableName(TEST_TABLE) + " WHERE (stringField = " + stringLiteralPrefix() +value+")";
assertEquals("Select scripts are not the same", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
pulsar_SchemaReader_setSchemaInfoProvider | /**
* Set schema info provider, this method support multi version reader.
*
* @param schemaInfoProvider the stream of message
*/
default void setSchemaInfoProvider(SchemaInfoProvider schemaInfoProvider) {
} | 3.68 |
flink_DeduplicateFunctionHelper_checkInsertOnly | /** check message should be insert only. */
static void checkInsertOnly(RowData currentRow) {
Preconditions.checkArgument(currentRow.getRowKind() == RowKind.INSERT);
} | 3.68 |
framework_DragSourceExtensionConnector_sendDragEndEventToServer | /**
* Initiates a server RPC for the drag end event.
*
* @param dragEndEvent
* Client side dragend event.
* @param dropEffect
* Drop effect of the dragend event, extracted from {@code
* DataTransfer.dropEffect} parameter.
*/
protected void sendDragEndEventToServer(NativeEvent dragEndEvent,
DropEffect dropEffect) {
getRpcProxy(DragSourceRpc.class).dragEnd(dropEffect);
} | 3.68 |
pulsar_ManagedLedgerConfig_getMinimumBacklogCursorsForCaching | /**
* Minimum cursors with backlog after which broker is allowed to cache read entries to reuse them for other cursors'
* backlog reads. (Default = 0, broker will not cache backlog reads)
*
* @return
*/
public int getMinimumBacklogCursorsForCaching() {
return minimumBacklogCursorsForCaching;
} | 3.68 |
querydsl_ExtendedTypeFactory_createClassType | // TODO : simplify
private Type createClassType(DeclaredType declaredType, TypeElement typeElement, boolean deep) {
// other
String name = typeElement.getQualifiedName().toString();
if (name.startsWith("java.")) {
Iterator<? extends TypeMirror> i = declaredType.getTypeArguments().iterator();
if (isAssignable(declaredType, mapType)) {
return createMapType(i, deep);
} else if (isAssignable(declaredType, listType)) {
return createCollectionType(Types.LIST, i, deep);
} else if (isAssignable(declaredType, setType)) {
return createCollectionType(Types.SET, i, deep);
} else if (isAssignable(declaredType, collectionType)) {
return createCollectionType(Types.COLLECTION, i, deep);
}
}
TypeCategory typeCategory = TypeCategory.get(name);
if (typeCategory != TypeCategory.NUMERIC
&& isAssignable(typeElement.asType(), comparableType)
&& isSubType(typeElement.asType(), numberType)) {
typeCategory = TypeCategory.NUMERIC;
} else if (!typeCategory.isSubCategoryOf(TypeCategory.COMPARABLE)
&& isAssignable(typeElement.asType(), comparableType)) {
typeCategory = TypeCategory.COMPARABLE;
}
for (Class<? extends Annotation> entityAnn : entityAnnotations) {
if (isSimpleTypeEntity(typeElement, entityAnn)) {
typeCategory = TypeCategory.ENTITY;
}
}
List<? extends TypeMirror> arguments = declaredType.getTypeArguments();
// for intersection types etc
if (name.equals("")) {
TypeMirror type = objectType;
if (typeCategory == TypeCategory.COMPARABLE) {
type = comparableType;
}
// find most specific type of superTypes which is a subtype of type
List<? extends TypeMirror> superTypes = env.getTypeUtils().directSupertypes(declaredType);
for (TypeMirror superType : superTypes) {
if (env.getTypeUtils().isSubtype(superType, type)) {
type = superType;
}
}
typeElement = (TypeElement) env.getTypeUtils().asElement(type);
if (type instanceof DeclaredType) {
arguments = ((DeclaredType) type).getTypeArguments();
}
}
Type type = createType(typeElement, typeCategory, arguments, deep);
TypeMirror superType = typeElement.getSuperclass();
TypeElement superTypeElement = null;
if (superType instanceof DeclaredType) {
superTypeElement = (TypeElement) ((DeclaredType) superType).asElement();
}
// entity type
for (Class<? extends Annotation> entityAnn : entityAnnotations) {
if (typeElement.getAnnotation(entityAnn) != null ||
(superTypeElement != null && superTypeElement.getAnnotation(entityAnn) != null)) {
EntityType entityType = new EntityType(type, variableNameFunction);
typeMappings.register(entityType, queryTypeFactory.create(entityType));
return entityType;
}
}
return type;
} | 3.68 |
hbase_CheckAndMutate_ifNotExists | /**
* Check for lack of column
* @param family family to check
* @param qualifier qualifier to check
* @return the CheckAndMutate object
*/
public Builder ifNotExists(byte[] family, byte[] qualifier) {
return ifEquals(family, qualifier, null);
} | 3.68 |
flink_CompactingHashTable_getPartitioningFanOutNoEstimates | /**
* Gets the number of partitions to be used for an initial hash-table, when no estimates are
* available.
*
* <p>The current logic makes sure that there are always between 10 and 32 partitions, and close
* to 0.1 of the number of buffers.
*
* @param numBuffers The number of buffers available.
* @return The number of partitions to use.
*/
private static int getPartitioningFanOutNoEstimates(int numBuffers) {
return Math.max(10, Math.min(numBuffers / 10, MAX_NUM_PARTITIONS));
} | 3.68 |
flink_KubernetesUtils_getDeploymentName | /** Generate name of the Deployment. */
public static String getDeploymentName(String clusterId) {
return clusterId;
} | 3.68 |
framework_VaadinSession_writeObject | /**
* Override default serialization logic to avoid
* ConcurrentModificationException if the contents are modified while
* serialization is reading them.
*/
private void writeObject(ObjectOutputStream out) throws IOException {
Lock lock = this.lock;
if (lock != null) {
lock.lock();
}
try {
out.defaultWriteObject();
} finally {
if (lock != null) {
lock.unlock();
}
}
} | 3.68 |
zxing_DetectionResultRowIndicatorColumn_adjustCompleteIndicatorColumnRowNumbers | // TODO implement properly
// TODO maybe we should add missing codewords to store the correct row number to make
// finding row numbers for other columns easier
// use row height count to make detection of invalid row numbers more reliable
void adjustCompleteIndicatorColumnRowNumbers(BarcodeMetadata barcodeMetadata) {
Codeword[] codewords = getCodewords();
setRowNumbers();
removeIncorrectCodewords(codewords, barcodeMetadata);
BoundingBox boundingBox = getBoundingBox();
ResultPoint top = isLeft ? boundingBox.getTopLeft() : boundingBox.getTopRight();
ResultPoint bottom = isLeft ? boundingBox.getBottomLeft() : boundingBox.getBottomRight();
int firstRow = imageRowToCodewordIndex((int) top.getY());
int lastRow = imageRowToCodewordIndex((int) bottom.getY());
// We need to be careful using the average row height. Barcode could be skewed so that we have smaller and
// taller rows
//float averageRowHeight = (lastRow - firstRow) / (float) barcodeMetadata.getRowCount();
int barcodeRow = -1;
int maxRowHeight = 1;
int currentRowHeight = 0;
for (int codewordsRow = firstRow; codewordsRow < lastRow; codewordsRow++) {
if (codewords[codewordsRow] == null) {
continue;
}
Codeword codeword = codewords[codewordsRow];
int rowDifference = codeword.getRowNumber() - barcodeRow;
// TODO improve handling with case where first row indicator doesn't start with 0
if (rowDifference == 0) {
currentRowHeight++;
} else if (rowDifference == 1) {
maxRowHeight = Math.max(maxRowHeight, currentRowHeight);
currentRowHeight = 1;
barcodeRow = codeword.getRowNumber();
} else if (rowDifference < 0 ||
codeword.getRowNumber() >= barcodeMetadata.getRowCount() ||
rowDifference > codewordsRow) {
codewords[codewordsRow] = null;
} else {
int checkedRows;
if (maxRowHeight > 2) {
checkedRows = (maxRowHeight - 2) * rowDifference;
} else {
checkedRows = rowDifference;
}
boolean closePreviousCodewordFound = checkedRows >= codewordsRow;
for (int i = 1; i <= checkedRows && !closePreviousCodewordFound; i++) {
// there must be (height * rowDifference) number of codewords missing. For now we assume height = 1.
// This should hopefully get rid of most problems already.
closePreviousCodewordFound = codewords[codewordsRow - i] != null;
}
if (closePreviousCodewordFound) {
codewords[codewordsRow] = null;
} else {
barcodeRow = codeword.getRowNumber();
currentRowHeight = 1;
}
}
}
//return (int) (averageRowHeight + 0.5);
} | 3.68 |
hadoop_ReconfigurationException_getNewValue | /**
* Get value to which property was supposed to be changed.
* @return new value.
*/
public String getNewValue() {
return newVal;
} | 3.68 |
hadoop_VolumeFailureSummary_getLastVolumeFailureDate | /**
* Returns the date/time of the last volume failure in milliseconds since
* epoch.
*
* @return date/time of last volume failure in milliseconds since epoch
*/
public long getLastVolumeFailureDate() {
return this.lastVolumeFailureDate;
} | 3.68 |
flink_Tuple2_toString | /**
* Creates a string representation of the tuple in the form (f0, f1), where the individual
* fields are the value returned by calling {@link Object#toString} on that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return "("
+ StringUtils.arrayAwareToString(this.f0)
+ ","
+ StringUtils.arrayAwareToString(this.f1)
+ ")";
} | 3.68 |
morf_DatabaseMetaDataProvider_tables | /**
* @see org.alfasoftware.morf.metadata.Schema#tables()
*/
@Override
public Collection<Table> tables() {
return tableNames.get().values().stream().map(RealName::getRealName).map(this::getTable).collect(Collectors.toList());
} | 3.68 |
framework_Calendar_setLastVisibleDayOfWeek | /**
* <p>
* This method restricts the weekdays that are shown. This affects both the
* monthly and the weekly view. The general contract is that <b>firstDay <
* lastDay</b>.
* </p>
*
* <p>
* Note that this only affects the rendering process. Events are still
* requested by the dates set by {@link #setStartDate(Date)} and
* {@link #setEndDate(Date)}.
* </p>
*
* @param lastDay
* the first day of the week to show, between 1 and 7
*/
public void setLastVisibleDayOfWeek(int lastDay) {
if (this.lastDay != lastDay && lastDay >= 1 && lastDay <= 7
&& getFirstVisibleDayOfWeek() <= lastDay) {
this.lastDay = lastDay;
getState().lastVisibleDayOfWeek = lastDay;
}
} | 3.68 |
hbase_ProcedureStoreTracker_mergeNodes | /**
* Merges {@code leftNode} & {@code rightNode} and updates the map.
*/
private BitSetNode mergeNodes(BitSetNode leftNode, BitSetNode rightNode) {
assert leftNode.getStart() < rightNode.getStart();
leftNode.merge(rightNode);
map.remove(rightNode.getStart());
return leftNode;
} | 3.68 |
flink_RestServerEndpoint_getRestBaseUrl | /**
* Returns the base URL of the REST server endpoint.
*
* @return REST base URL of this endpoint
*/
public String getRestBaseUrl() {
synchronized (lock) {
assertRestServerHasBeenStarted();
return restBaseUrl;
}
} | 3.68 |
hadoop_FutureIO_awaitFuture | /**
* Given a future, evaluate it.
* <p>
* Any exception generated in the future is
* extracted and rethrown.
* </p>
* @param future future to evaluate
* @param timeout timeout to wait
* @param unit time unit.
* @param <T> type of the result.
* @return the result, if all went well.
* @throws InterruptedIOException future was interrupted
* @throws IOException if something went wrong
* @throws RuntimeException any nested RTE thrown
* @throws TimeoutException the future timed out.
*/
public static <T> T awaitFuture(final Future<T> future,
final long timeout,
final TimeUnit unit)
throws InterruptedIOException, IOException, RuntimeException,
TimeoutException {
try {
return future.get(timeout, unit);
} catch (InterruptedException e) {
throw (InterruptedIOException) new InterruptedIOException(e.toString())
.initCause(e);
} catch (ExecutionException e) {
return raiseInnerCause(e);
}
} | 3.68 |
framework_Table_setCurrentPageFirstItemId | /**
* Setter for property currentPageFirstItemId.
*
* @param currentPageFirstItemId
* the New value of property currentPageFirstItemId.
*/
public void setCurrentPageFirstItemId(Object currentPageFirstItemId) {
// Gets the corresponding index
int index = -1;
if (items instanceof Container.Indexed) {
index = indexOfId(currentPageFirstItemId);
} else {
// If the table item container does not have index, we have to
// calculates the index by hand
Object id = firstItemId();
while (id != null && !id.equals(currentPageFirstItemId)) {
index++;
id = nextItemId(id);
}
if (id == null) {
index = -1;
}
}
// If the search for item index was successful
if (index >= 0) {
/*
* The table is not capable of displaying an item in the container
* as the first if there are not enough items following the selected
* item so the whole table (pagelength) is filled.
*/
int maxIndex = size() - pageLength;
if (maxIndex < 0) {
maxIndex = 0;
}
if (index > maxIndex) {
// Note that we pass index, not maxIndex, letting
// setCurrentPageFirstItemIndex handle the situation.
setCurrentPageFirstItemIndex(index);
return;
}
this.currentPageFirstItemId = currentPageFirstItemId;
currentPageFirstItemIndex = index;
}
// Assures the visual refresh
refreshRowCache();
} | 3.68 |
dubbo_URLParam_getParameter | /**
* get value of specified key in URLParam
*
* @param key specified key
* @return value, null if key is absent
*/
public String getParameter(String key) {
int keyIndex = DynamicParamTable.getKeyIndex(enableCompressed, key);
if (keyIndex < 0) {
return EXTRA_PARAMS.get(key);
}
if (KEY.get(keyIndex)) {
String value;
int offset = keyIndexToOffset(keyIndex);
value = DynamicParamTable.getValue(keyIndex, offset);
return value;
// if (StringUtils.isEmpty(value)) {
// // Forward compatible, make sure key dynamic increment can work.
// // In that case, some values which are proceed before increment will set in EXTRA_PARAMS.
// return EXTRA_PARAMS.get(key);
// } else {
// return value;
// }
}
return null;
} | 3.68 |
hbase_MobUtils_createMobRefCell | /**
* Creates a mob reference KeyValue. The value of the mob reference KeyValue is mobCellValueSize +
* mobFileName.
* @param cell The original Cell.
* @param fileName The mob file name where the mob reference KeyValue is written.
* @param tableNameTag The tag of the current table name. It's very important in cloning the
* snapshot.
* @return The mob reference KeyValue.
*/
public static Cell createMobRefCell(Cell cell, byte[] fileName, Tag tableNameTag) {
// Append the tags to the KeyValue.
// The key is same, the value is the filename of the mob file
List<Tag> tags = new ArrayList<>();
// Add the ref tag as the 1st one.
tags.add(MobConstants.MOB_REF_TAG);
// Add the tag of the source table name, this table is where this mob file is flushed
// from.
// It's very useful in cloning the snapshot. When reading from the cloning table, we need to
// find the original mob files by this table name. For details please see cloning
// snapshot for mob files.
tags.add(tableNameTag);
return createMobRefCell(cell, fileName, TagUtil.fromList(tags));
} | 3.68 |
morf_SqlDialect_addTableFromStatements | /**
* Generates the SQL to create a table and insert the data specified in the {@link SelectStatement}.
*
* @param table The table to create.
* @param selectStatement The {@link SelectStatement}
* @return A collection of SQL statements
*/
public Collection<String> addTableFromStatements(Table table, SelectStatement selectStatement) {
return ImmutableList.<String>builder()
.addAll(
tableDeploymentStatements(table)
)
.addAll(convertStatementToSQL(
SqlUtils.insert().into(SqlUtils.tableRef(table.getName())).from(selectStatement))
)
.build();
} | 3.68 |
hbase_ImmutableBytesWritable_getLength | /** Returns the number of valid bytes in the buffer */
public int getLength() {
if (this.bytes == null) {
throw new IllegalStateException(
"Uninitialiized. Null constructor " + "called w/o accompaying readFields invocation");
}
return this.length;
} | 3.68 |
hbase_ProcedureWALPrettyPrinter_processFile | /**
* Reads a log file and outputs its contents.
* @param conf HBase configuration relevant to this log file
* @param p path of the log file to be read
* @throws IOException IOException
*/
public void processFile(final Configuration conf, final Path p) throws IOException {
FileSystem fs = p.getFileSystem(conf);
if (!fs.exists(p)) {
System.err.println("ERROR, file doesnt exist: " + p);
return;
}
if (!fs.isFile(p)) {
System.err.println(p + " is not a file");
return;
}
FileStatus logFile = fs.getFileStatus(p);
if (logFile.getLen() == 0) {
out.println("Zero length file: " + p);
return;
}
out.println("Opening procedure state-log: " + p);
ProcedureWALFile log = new ProcedureWALFile(fs, logFile);
processProcedureWALFile(log);
} | 3.68 |
hudi_LogReaderUtils_decodeRecordPositionsHeader | /**
* Decodes the {@link HeaderMetadataType#RECORD_POSITIONS} block header into record positions.
*
* @param content A string of Base64-encoded bytes ({@link java.util.Base64} in Java
* implementation) generated from serializing {@link Roaring64NavigableMap}
* bitmap using the portable format.
* @return A {@link Roaring64NavigableMap} bitmap containing the record positions in long type.
* @throws IOException upon I/O error.
*/
public static Roaring64NavigableMap decodeRecordPositionsHeader(String content) throws IOException {
Roaring64NavigableMap positionBitmap = new Roaring64NavigableMap();
ByteArrayInputStream bais = new ByteArrayInputStream(Base64CodecUtil.decode(content));
DataInputStream dis = new DataInputStream(bais);
positionBitmap.deserializePortable(dis);
return positionBitmap;
} | 3.68 |
hadoop_CopyOutputFormat_getWorkingDirectory | /**
* Getter for the working directory.
* @param job The Job from whose configuration the working-directory is to
* be retrieved.
* @return The working-directory Path.
*/
public static Path getWorkingDirectory(Job job) {
return getWorkingDirectory(job.getConfiguration());
} | 3.68 |
hadoop_ReadBufferManager_getBufferFromCompletedQueue | /**
* Returns buffers that failed or passed from completed queue.
* @param stream
* @param requestedOffset
* @return
*/
private ReadBuffer getBufferFromCompletedQueue(final AbfsInputStream stream, final long requestedOffset) {
for (ReadBuffer buffer : completedReadList) {
// Buffer is returned if the requestedOffset is at or above buffer's
// offset but less than buffer's length or the actual requestedLength
if ((buffer.getStream() == stream)
&& (requestedOffset >= buffer.getOffset())
&& ((requestedOffset < buffer.getOffset() + buffer.getLength())
|| (requestedOffset < buffer.getOffset() + buffer.getRequestedLength()))) {
return buffer;
}
}
return null;
} | 3.68 |
flink_SpillingThread_mergeChannelList | /**
* Merges the given sorted runs to a smaller number of sorted runs.
*
* @param channelIDs The IDs of the sorted runs that need to be merged.
* @param allReadBuffers
* @param writeBuffers The buffers to be used by the writers.
* @return A list of the IDs of the merged channels.
* @throws IOException Thrown, if the readers or writers encountered an I/O problem.
*/
private List<ChannelWithBlockCount> mergeChannelList(
final List<ChannelWithBlockCount> channelIDs,
final List<MemorySegment> allReadBuffers,
final List<MemorySegment> writeBuffers)
throws IOException {
// A channel list with length maxFanIn<sup>i</sup> can be merged to maxFanIn files in i-1
// rounds where every merge
// is a full merge with maxFanIn input channels. A partial round includes merges with fewer
// than maxFanIn
// inputs. It is most efficient to perform the partial round first.
final double scale = Math.ceil(Math.log(channelIDs.size()) / Math.log(this.maxFanIn)) - 1;
final int numStart = channelIDs.size();
final int numEnd = (int) Math.pow(this.maxFanIn, scale);
final int numMerges = (int) Math.ceil((numStart - numEnd) / (double) (this.maxFanIn - 1));
final int numNotMerged = numEnd - numMerges;
final int numToMerge = numStart - numNotMerged;
// unmerged channel IDs are copied directly to the result list
final List<ChannelWithBlockCount> mergedChannelIDs = new ArrayList<>(numEnd);
mergedChannelIDs.addAll(channelIDs.subList(0, numNotMerged));
final int channelsToMergePerStep = (int) Math.ceil(numToMerge / (double) numMerges);
// allocate the memory for the merging step
final List<List<MemorySegment>> readBuffers = new ArrayList<>(channelsToMergePerStep);
getSegmentsForReaders(readBuffers, allReadBuffers, channelsToMergePerStep);
final List<ChannelWithBlockCount> channelsToMergeThisStep =
new ArrayList<>(channelsToMergePerStep);
int channelNum = numNotMerged;
while (isRunning() && channelNum < channelIDs.size()) {
channelsToMergeThisStep.clear();
for (int i = 0;
i < channelsToMergePerStep && channelNum < channelIDs.size();
i++, channelNum++) {
channelsToMergeThisStep.add(channelIDs.get(channelNum));
}
mergedChannelIDs.add(mergeChannels(channelsToMergeThisStep, readBuffers, writeBuffers));
}
return mergedChannelIDs;
} | 3.68 |
flink_AbstractColumnReader_readToVector | /** Reads `total` values from this columnReader into column. */
@Override
public final void readToVector(int readNumber, VECTOR vector) throws IOException {
int rowId = 0;
WritableIntVector dictionaryIds = null;
if (dictionary != null) {
dictionaryIds = vector.reserveDictionaryIds(readNumber);
}
while (readNumber > 0) {
// Compute the number of values we want to read in this page.
int leftInPage = (int) (endOfPageValueCount - valuesRead);
if (leftInPage == 0) {
DataPage page = pageReader.readPage();
if (page instanceof DataPageV1) {
readPageV1((DataPageV1) page);
} else if (page instanceof DataPageV2) {
readPageV2((DataPageV2) page);
} else {
throw new RuntimeException("Unsupported page type: " + page.getClass());
}
leftInPage = (int) (endOfPageValueCount - valuesRead);
}
int num = Math.min(readNumber, leftInPage);
if (isCurrentPageDictionaryEncoded) {
// Read and decode dictionary ids.
runLenDecoder.readDictionaryIds(
num, dictionaryIds, vector, rowId, maxDefLevel, this.dictionaryIdsDecoder);
if (vector.hasDictionary() || (rowId == 0 && supportLazyDecode())) {
// Column vector supports lazy decoding of dictionary values so just set the
// dictionary.
// We can't do this if rowId != 0 AND the column doesn't have a dictionary (i.e.
// some
// non-dictionary encoded values have already been added).
vector.setDictionary(new ParquetDictionary(dictionary));
} else {
readBatchFromDictionaryIds(rowId, num, vector, dictionaryIds);
}
} else {
if (vector.hasDictionary() && rowId != 0) {
// This batch already has dictionary encoded values but this new page is not.
// The batch
// does not support a mix of dictionary and not so we will decode the
// dictionary.
readBatchFromDictionaryIds(0, rowId, vector, vector.getDictionaryIds());
}
vector.setDictionary(null);
readBatch(rowId, num, vector);
}
valuesRead += num;
rowId += num;
readNumber -= num;
}
} | 3.68 |
framework_WindowElement_isMaximized | /**
* Check if this window is currently maximized.
*/
public boolean isMaximized() {
return isElementPresent(By.className(RESTORE_BOX_CLASS));
} | 3.68 |
hbase_HBaseServerBase_setupClusterConnection | /**
* Setup our cluster connection if not already initialized.
*/
protected final synchronized void setupClusterConnection() throws IOException {
if (asyncClusterConnection == null) {
InetSocketAddress localAddress =
new InetSocketAddress(rpcServices.getSocketAddress().getAddress(), 0);
User user = userProvider.getCurrent();
asyncClusterConnection =
ClusterConnectionFactory.createAsyncClusterConnection(this, conf, localAddress, user);
}
} | 3.68 |
flink_ProjectOperator_projectTuple1 | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0> ProjectOperator<T, Tuple1<T0>> projectTuple1() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple1<T0>> tType = new TupleTypeInfo<Tuple1<T0>>(fTypes);
return new ProjectOperator<T, Tuple1<T0>>(this.ds, this.fieldIndexes, tType);
} | 3.68 |
framework_DesignContext_shouldWriteDefaultValues | /**
* Determines whether default attribute values should be written by the
* {@code DesignAttributeHandler#writeAttribute(String, Attributes, Object, Object, Class, DesignContext)}
* method. Default is {@code false}.
*
* @since 8.0
* @return {@code true} if default values of attributes should be written,
* otherwise {@code false}
*/
public boolean shouldWriteDefaultValues() {
return shouldWriteDefaultValues;
} | 3.68 |
hadoop_ManifestCommitter_getWorkPath | /**
* Work path of the current task attempt.
* This is null if the task does not have one.
* @return a path.
*/
@Override
public Path getWorkPath() {
return getTaskAttemptDir();
} | 3.68 |
framework_VAbstractDropHandler_drop | /**
* The default implemmentation visits server if {@link AcceptCriterion}
* can't be verified on client or if {@link AcceptCriterion} are met on
* client.
*/
@Override
public boolean drop(VDragEvent drag) {
if (acceptCriteria.needsServerSideCheck(drag, criterioUIDL)) {
return true;
} else {
validated = false;
acceptCriteria.accept(drag, criterioUIDL,
event -> validated = true);
return validated;
}
} | 3.68 |
flink_TimestampedValue_hasTimestamp | /**
* Checks whether this record has a timestamp.
*
* @return True if the record has a timestamp, false if not.
*/
public boolean hasTimestamp() {
return hasTimestamp;
} | 3.68 |
shardingsphere-elasticjob_AbstractDistributeOnceElasticJobListener_notifyWaitingTaskComplete | /**
* Notify waiting task complete.
*/
public void notifyWaitingTaskComplete() {
synchronized (completedWait) {
completedWait.notifyAll();
}
} | 3.68 |
framework_VAbstractTextualDate_checkGroupFocus | /**
* Checks if the group focus has changed, and sends to the server if needed.
*
* @param textFocus
* the focus of the {@link #text}
* @since 8.3
*/
protected void checkGroupFocus(boolean textFocus) {
boolean newGroupFocus = textFocus | hasChildFocus();
if (getClient() != null
&& connector.hasEventListener(
textFocus ? EventId.FOCUS : EventId.BLUR)
&& groupFocus != newGroupFocus) {
if (newGroupFocus) {
rpc.focus();
} else {
rpc.blur();
}
sendBufferedValues();
groupFocus = newGroupFocus;
}
} | 3.68 |
flink_BlobInputStream_throwEOFException | /**
* Convenience method to throw an {@link EOFException}.
*
* @throws EOFException thrown to indicate the underlying input stream did not provide as much
* data as expected
*/
private void throwEOFException() throws EOFException {
throw new EOFException(
String.format(
"Expected to read %d more bytes from stream",
this.bytesToReceive - this.bytesReceived));
} | 3.68 |
flink_GateNotificationHelper_notifyPriority | /** Must be called under lock to ensure integrity of priorityAvailabilityHelper. */
public void notifyPriority() {
toNotifyPriority = inputGate.priorityAvailabilityHelper.getUnavailableToResetAvailable();
} | 3.68 |
hadoop_AzureBlobFileSystem_breakLease | /**
* Break the current lease on an ABFS file if it exists. A lease that is broken cannot be
* renewed. A new lease may be obtained on the file immediately.
*
* @param f file name
* @throws IOException on any exception while breaking the lease
*/
public void breakLease(final Path f) throws IOException {
LOG.debug("AzureBlobFileSystem.breakLease path: {}", f);
Path qualifiedPath = makeQualified(f);
try (DurationInfo ignored = new DurationInfo(LOG, false, "Break lease for %s",
qualifiedPath)) {
TracingContext tracingContext = new TracingContext(clientCorrelationId,
fileSystemId, FSOperationType.BREAK_LEASE, tracingHeaderFormat,
listener);
abfsStore.breakLease(qualifiedPath, tracingContext);
} catch (AzureBlobFileSystemException ex) {
checkException(f, ex);
}
} | 3.68 |
hadoop_ZStandardDecompressor_reset | /**
* Resets everything including the input buffers (user and direct).
*/
@Override
public void reset() {
checkStream();
init(stream);
remaining = 0;
finished = false;
compressedDirectBufOff = 0;
bytesInCompressedBuffer = 0;
uncompressedDirectBuf.limit(directBufferSize);
uncompressedDirectBuf.position(directBufferSize);
userBufOff = 0;
userBufferBytesToConsume = 0;
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.