name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_NodePlan_setPort | /**
* Sets the DataNode RPC Port.
*
* @param port - int
*/
public void setPort(int port) {
this.port = port;
} | 3.68 |
zxing_TelResultHandler_getDisplayContents | // Overriden so we can take advantage of Android's phone number hyphenation routines.
@Override
public CharSequence getDisplayContents() {
String contents = getResult().getDisplayResult();
contents = contents.replace("\r", "");
return formatPhone(contents);
} | 3.68 |
hadoop_SuccessData_getJobId | /** @return Job ID, if known. */
public String getJobId() {
return jobId;
} | 3.68 |
hadoop_TwoColumnLayout_render | /*
* (non-Javadoc)
* @see org.apache.hadoop.yarn.webapp.view.HtmlPage#render(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
*/
@Override protected void render(Page.HTML<__> html) {
preHead(html);
html.
title($(TITLE)).
link(root_url("static", "yarn.css")).
style("#layout { height: 100%; }",
"#layout thead td { height: 3em; }",
"#layout #navcell { width: 11em; padding: 0 1em; }",
"#layout td.content { padding-top: 0 }",
"#layout tbody { vertical-align: top; }",
"#layout tfoot td { height: 4em; }").
__(JQueryUI.class);
postHead(html);
JQueryUI.jsnotice(html);
html.
table("#layout.ui-widget-content").
thead().
tr().
td().$colspan(2).
__(header()).__().__().__().
tfoot().
tr().
td().$colspan(2).
__(footer()).__().__().__().
tbody().
tr().
td().$id("navcell").
__(nav()).__().
td().$class("content").
__(content()).__().__().__().__().__();
} | 3.68 |
hudi_ParquetUtils_readAvroRecords | /**
* NOTE: This literally reads the entire file contents, thus should be used with caution.
*/
@Override
public List<GenericRecord> readAvroRecords(Configuration configuration, Path filePath) {
List<GenericRecord> records = new ArrayList<>();
try (ParquetReader reader = AvroParquetReader.builder(filePath).withConf(configuration).build()) {
Object obj = reader.read();
while (obj != null) {
if (obj instanceof GenericRecord) {
records.add(((GenericRecord) obj));
}
obj = reader.read();
}
} catch (IOException e) {
throw new HoodieIOException("Failed to read avro records from Parquet " + filePath, e);
}
return records;
} | 3.68 |
framework_Overlay_setHeight | /**
* Sets the pixel value for height css property.
*
* @param height
* value to set
*/
public void setHeight(int height) {
if (height < 0) {
height = 0;
}
this.height = height;
} | 3.68 |
hadoop_AbstractS3ACommitter_createSuccessData | /**
* Create the success data structure from a job context.
* @param context job context.
* @param filenames short list of filenames; nullable
* @param ioStatistics IOStatistics snapshot
* @param destConf config of the dest fs, can be null
* @return the structure
*
*/
private SuccessData createSuccessData(final JobContext context,
final List<String> filenames,
final IOStatisticsSnapshot ioStatistics,
final Configuration destConf) {
// create a success data structure
SuccessData successData = new SuccessData();
successData.setCommitter(getName());
successData.setJobId(uuid);
successData.setJobIdSource(uuidSource.getText());
successData.setDescription(getRole());
successData.setHostname(NetUtils.getLocalHostname());
Date now = new Date();
successData.setTimestamp(now.getTime());
successData.setDate(now.toString());
if (filenames != null) {
successData.setFilenames(filenames);
}
successData.getIOStatistics().aggregate(ioStatistics);
// attach some config options as diagnostics to assist
// in debugging performance issues.
// commit thread pool size
successData.addDiagnostic(FS_S3A_COMMITTER_THREADS,
Integer.toString(getJobCommitThreadCount(context)));
// and filesystem http connection and thread pool sizes
if (destConf != null) {
successData.addDiagnostic(MAXIMUM_CONNECTIONS,
destConf.get(MAXIMUM_CONNECTIONS,
Integer.toString(DEFAULT_MAXIMUM_CONNECTIONS)));
successData.addDiagnostic(MAX_TOTAL_TASKS,
destConf.get(MAX_TOTAL_TASKS,
Integer.toString(DEFAULT_MAX_TOTAL_TASKS)));
}
return successData;
}
/**
* Base job setup (optionally) deletes the success marker and
* always creates the destination directory.
* When objects are committed that dest dir marker will inevitably
* be deleted; creating it now ensures there is something at the end
* while the job is in progress -and if nothing is created, that
* it is still there.
* <p>
* The option {@link InternalCommitterConstants#FS_S3A_COMMITTER_UUID}
* is set to the job UUID; if generated locally
* {@link InternalCommitterConstants#SPARK_WRITE_UUID} is also patched.
* The field {@link #jobSetup} | 3.68 |
querydsl_LuceneSerializer_createBooleanClause | /**
* If the query is a BooleanQuery and it contains a single Occur.MUST_NOT
* clause it will be returned as is. Otherwise it will be wrapped in a
* BooleanClause with the given Occur.
*/
private BooleanClause createBooleanClause(Query query, Occur occur) {
if (query instanceof BooleanQuery) {
BooleanClause[] clauses = ((BooleanQuery) query).getClauses();
if (clauses.length == 1 && clauses[0].getOccur().equals(Occur.MUST_NOT)) {
return clauses[0];
}
}
return new BooleanClause(query, occur);
} | 3.68 |
pulsar_ManagedLedgerConfig_setAckQuorumSize | /**
* @param ackQuorumSize
* the ackQuorumSize to set
*/
public ManagedLedgerConfig setAckQuorumSize(int ackQuorumSize) {
this.ackQuorumSize = ackQuorumSize;
return this;
} | 3.68 |
hbase_CompactingMemStore_tryFlushInMemoryAndCompactingAsync | /**
* Try to flush the currActive in memory and submit the background
* {@link InMemoryCompactionRunnable} to
* {@link RegionServicesForStores#getInMemoryCompactionPool()}. Just one thread can do the actual
* flushing in memory.
* @param currActive current Active Segment to be flush in memory.
*/
private void tryFlushInMemoryAndCompactingAsync(MutableSegment currActive) {
if (currActive.setInMemoryFlushed()) {
flushInMemory(currActive);
if (setInMemoryCompactionFlag()) {
// The thread is dispatched to do in-memory compaction in the background
InMemoryCompactionRunnable runnable = new InMemoryCompactionRunnable();
if (LOG.isTraceEnabled()) {
LOG.trace(
"Dispatching the MemStore in-memory flush for store " + store.getColumnFamilyName());
}
getPool().execute(runnable);
}
}
} | 3.68 |
hadoop_ExportedBlockKeys_readFields | /**
*/
@Override
public void readFields(DataInput in) throws IOException {
isBlockTokenEnabled = in.readBoolean();
keyUpdateInterval = in.readLong();
tokenLifetime = in.readLong();
currentKey.readFields(in);
this.allKeys = new BlockKey[in.readInt()];
for (int i = 0; i < allKeys.length; i++) {
allKeys[i] = new BlockKey();
allKeys[i].readFields(in);
}
} | 3.68 |
hadoop_BlockData_getState | /**
* Gets the state of the given block.
* @param blockNumber the id of the given block.
* @return the state of the given block.
* @throws IllegalArgumentException if blockNumber is invalid.
*/
public State getState(int blockNumber) {
throwIfInvalidBlockNumber(blockNumber);
return state[blockNumber];
} | 3.68 |
hadoop_MultipartUploaderBuilderImpl_blockSize | /**
* Set block size.
*/
@Override
public B blockSize(long blkSize) {
blockSize = blkSize;
return getThisBuilder();
} | 3.68 |
hbase_ReusableStreamGzipCodec_finish | /**
* Override because certain implementation calls def.end() which causes problem when resetting
* the stream for reuse.
*/
@Override
public void finish() throws IOException {
if (HAS_BROKEN_FINISH) {
if (!def.finished()) {
def.finish();
while (!def.finished()) {
int i = def.deflate(this.buf, 0, this.buf.length);
if (def.finished() && (i <= this.buf.length - TRAILER_SIZE)) {
writeTrailer(this.buf, i);
i += TRAILER_SIZE;
out.write(this.buf, 0, i);
return;
}
if (i > 0) {
out.write(this.buf, 0, i);
}
}
byte[] arrayOfByte = new byte[TRAILER_SIZE];
writeTrailer(arrayOfByte, 0);
out.write(arrayOfByte);
}
} else {
super.finish();
}
} | 3.68 |
flink_HiveTableUtil_enableConstraint | // returns a constraint trait that requires ENABLE
public static byte enableConstraint(byte trait) {
return (byte) (trait | HIVE_CONSTRAINT_ENABLE);
} | 3.68 |
pulsar_FileUtils_deleteFiles | /**
* Deletes given files.
*
* @param files to delete
* @param recurse will recurse
* @throws IOException if issues deleting files
*/
public static void deleteFiles(final Collection<File> files, final boolean recurse) throws IOException {
for (final File file : files) {
FileUtils.deleteFile(file, recurse);
}
} | 3.68 |
hadoop_QueuePriorityContainerCandidateSelector_preemptionAllowed | /**
* Do we allow demandingQueue preempt resource from toBePreemptedQueue
*
* @param demandingQueue demandingQueue
* @param toBePreemptedQueue toBePreemptedQueue
* @return can/cannot
*/
private boolean preemptionAllowed(String demandingQueue,
String toBePreemptedQueue) {
return priorityDigraph.contains(demandingQueue,
toBePreemptedQueue);
} | 3.68 |
hbase_AbstractFSWAL_getNumLogFiles | // public only until class moves to o.a.h.h.wal
/** Returns the number of log files in use */
public int getNumLogFiles() {
// +1 for current use log
return getNumRolledLogFiles() + 1;
} | 3.68 |
hadoop_DataNodeFaultInjector_stripedBlockChecksumReconstruction | /**
* Used as a hook to inject failure in erasure coding checksum reconstruction
* process.
*/
public void stripedBlockChecksumReconstruction() throws IOException {} | 3.68 |
hudi_TableSchemaResolver_getTableAvroSchemaFromLatestCommit | /**
* Returns table's latest Avro {@link Schema} iff table is non-empty (ie there's at least
* a single commit)
*
* This method differs from {@link #getTableAvroSchema(boolean)} in that it won't fallback
* to use table's schema used at creation
*/
public Option<Schema> getTableAvroSchemaFromLatestCommit(boolean includeMetadataFields) throws Exception {
if (metaClient.isTimelineNonEmpty()) {
return getTableAvroSchemaInternal(includeMetadataFields, Option.empty());
}
return Option.empty();
} | 3.68 |
hbase_CandidateGenerator_pickRandomRegion | /**
* From a list of regions pick a random one. Null can be returned which
* {@link StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region move
* rather than swap.
* @param cluster The state of the cluster
* @param server index of the server
* @param chanceOfNoSwap Chance that this will decide to try a move rather than a swap.
* @return a random {@link RegionInfo} or null if an asymmetrical move is suggested.
*/
int pickRandomRegion(BalancerClusterState cluster, int server, double chanceOfNoSwap) {
// Check to see if this is just a move.
if (
cluster.regionsPerServer[server].length == 0
|| ThreadLocalRandom.current().nextFloat() < chanceOfNoSwap
) {
// signal a move only.
return -1;
}
int rand = ThreadLocalRandom.current().nextInt(cluster.regionsPerServer[server].length);
return cluster.regionsPerServer[server][rand];
} | 3.68 |
framework_Header_setDefaultRow | /**
* Sets the default row of this header. The default row displays column
* captions and sort indicators.
*
* @param defaultRow
* the new default row, or null for no default row
*
* @throws IllegalArgumentException
* if the header does not contain the row
*/
public void setDefaultRow(Row defaultRow) {
if (defaultRow != null) {
if (!getRows().contains(defaultRow)) {
throw new IllegalArgumentException(
"The section does not contain the row");
}
if (defaultRow.isDefault()) {
return;
}
}
getRows().forEach(row -> row.setDefault(row == defaultRow));
markAsDirty();
} | 3.68 |
dubbo_RestProtocol_createReferenceCountedClient | /**
* create rest ReferenceCountedClient
*
* @param url
* @return
* @throws RpcException
*/
private ReferenceCountedClient<? extends RestClient> createReferenceCountedClient(URL url) throws RpcException {
// url -> RestClient
RestClient restClient = clientFactory.createRestClient(url);
return new ReferenceCountedClient<>(restClient, clients, clientFactory, url);
} | 3.68 |
dubbo_AbstractConfigManager_addIfAbsent | /**
* Add config
*
* @param config
* @param configsMap
* @return the existing equivalent config or the new adding config
* @throws IllegalStateException
*/
private <C extends AbstractConfig> C addIfAbsent(C config, Map<String, C> configsMap) throws IllegalStateException {
if (config == null || configsMap == null) {
return config;
}
// find by value
Optional<C> prevConfig = findDuplicatedConfig(configsMap, config);
if (prevConfig.isPresent()) {
return prevConfig.get();
}
String key = config.getId();
if (key == null) {
do {
// generate key if id is not set
key = generateConfigId(config);
} while (configsMap.containsKey(key));
}
C existedConfig = configsMap.get(key);
if (existedConfig != null && !isEquals(existedConfig, config)) {
String type = config.getClass().getSimpleName();
logger.warn(
COMMON_UNEXPECTED_EXCEPTION,
"",
"",
String.format(
"Duplicate %s found, there already has one default %s or more than two %ss have the same id, "
+ "you can try to give each %s a different id, override previous config with later config. id: %s, prev: %s, later: %s",
type, type, type, type, key, existedConfig, config));
}
// override existed config if any
configsMap.put(key, config);
return config;
} | 3.68 |
hibernate-validator_MethodInheritanceTree_getAllMethods | /**
* Returns a set containing all the methods of the hierarchy.
*
* @return a set containing all the methods of the hierarchy
*/
public Set<ExecutableElement> getAllMethods() {
return Collections.unmodifiableSet( methodNodeMapping.keySet() );
} | 3.68 |
hadoop_QueueCapacityConfigParser_heterogeneousParser | /**
* A parser method that is usable on resource capacity values e.g. mixed or
* absolute resource.
* @param matcher a regex matcher that contains the matched resource string
* @return a parsed capacity vector
*/
private QueueCapacityVector heterogeneousParser(Matcher matcher) {
QueueCapacityVector capacityVector = QueueCapacityVector.newInstance();
/*
* Absolute resource configuration for a queue will be grouped by "[]".
* Syntax of absolute resource config could be like below
* "memory=4Gi vcores=2". Ideally this means "4GB of memory and 2 vcores".
*/
// Get the sub-group.
String bracketedGroup = matcher.group(0);
// Get the string inside starting and closing []
bracketedGroup = bracketedGroup.substring(1, bracketedGroup.length() - 1);
// Split by comma and equals delimiter eg. the string memory=1024,vcores=6
// is converted to an array of array as {{memory,1024}, {vcores, 6}}
for (String kvPair : bracketedGroup.trim().split(",")) {
String[] splits = kvPair.split("=");
// Ensure that each sub string is key value pair separated by '='.
if (splits.length > 1) {
setCapacityVector(capacityVector, splits[0], splits[1]);
}
}
// Memory always have to be defined
if (capacityVector.getMemory() == 0L) {
return new QueueCapacityVector();
}
return capacityVector;
} | 3.68 |
hbase_FlushPolicyFactory_getFlushPolicyClass | /**
* Get FlushPolicy class for the given table.
*/
public static Class<? extends FlushPolicy> getFlushPolicyClass(TableDescriptor htd,
Configuration conf) throws IOException {
String className = htd.getFlushPolicyClassName();
if (className == null) {
className = conf.get(HBASE_FLUSH_POLICY_KEY, DEFAULT_FLUSH_POLICY_CLASS.getName());
}
try {
Class<? extends FlushPolicy> clazz = Class.forName(className).asSubclass(FlushPolicy.class);
return clazz;
} catch (Exception e) {
LOG.warn("Unable to load configured flush policy '" + className + "' for table '"
+ htd.getTableName() + "', load default flush policy "
+ DEFAULT_FLUSH_POLICY_CLASS.getName() + " instead", e);
return DEFAULT_FLUSH_POLICY_CLASS;
}
} | 3.68 |
framework_HasValue_getComponent | /**
* Returns the component.
*
* @return the component, not null
*/
public Component getComponent() {
return component;
} | 3.68 |
flink_AbstractFileStateBackend_getSavepointPath | /**
* Gets the directory where savepoints are stored by default (when no custom path is given to
* the savepoint trigger command).
*
* @return The default directory for savepoints, or null, if no default directory has been
* configured.
*/
@Nullable
public Path getSavepointPath() {
return baseSavepointPath;
} | 3.68 |
querydsl_QueryModifiers_subList | /**
* Get a sublist based on the restriction of limit and offset
*
* @param <T>
* @param list list to be handled
* @return sublist with limit and offset applied
*/
public <T> List<T> subList(List<T> list) {
if (!list.isEmpty()) {
int from = offset != null ? toInt(offset) : 0;
int to = limit != null ? (from + toInt(limit)) : list.size();
return list.subList(from, Math.min(to,list.size()));
} else {
return list;
}
} | 3.68 |
hadoop_AMWebServices_getJobFromJobIdString | /**
* convert a job id string to an actual job and handle all the error checking.
*/
public static Job getJobFromJobIdString(String jid, AppContext appCtx) throws NotFoundException {
JobId jobId;
Job job;
try {
jobId = MRApps.toJobID(jid);
} catch (YarnRuntimeException e) {
// TODO: after MAPREDUCE-2793 YarnRuntimeException is probably not expected here
// anymore but keeping it for now just in case other stuff starts failing.
// Also, the webservice should ideally return BadRequest (HTTP:400) when
// the id is malformed instead of NotFound (HTTP:404). The webserver on
// top of which AMWebServices is built seems to automatically do that for
// unhandled exceptions
throw new NotFoundException(e.getMessage());
} catch (IllegalArgumentException e) {
throw new NotFoundException(e.getMessage());
}
if (jobId == null) {
throw new NotFoundException("job, " + jid + ", is not found");
}
job = appCtx.getJob(jobId);
if (job == null) {
throw new NotFoundException("job, " + jid + ", is not found");
}
return job;
} | 3.68 |
hbase_RegionStateStore_hasGlobalReplicationScope | // ==========================================================================
// Table Descriptors helpers
// ==========================================================================
private boolean hasGlobalReplicationScope(TableName tableName) throws IOException {
return hasGlobalReplicationScope(getDescriptor(tableName));
} | 3.68 |
morf_SqlDialect_getSqlForLower | /**
* Converts the <code>LOWER</code> function into SQL.
*
* @param function the function to convert.
* @return a string representation of the SQL.
*/
protected String getSqlForLower(Function function) {
return "LOWER(" + getSqlFrom(function.getArguments().get(0)) + ")";
} | 3.68 |
flink_OneShotLatch_await | /**
* Waits until {@link OneShotLatch#trigger()} is called. Once {@code #trigger()} has been called
* this call will always return immediately.
*
* <p>If the latch is not triggered within the given timeout, a {@code TimeoutException} will be
* thrown after the timeout.
*
* <p>A timeout value of zero means infinite timeout and make this equivalent to {@link
* #await()}.
*
* @param timeout The value of the timeout, a value of zero indicating infinite timeout.
* @param timeUnit The unit of the timeout
* @throws InterruptedException Thrown if the thread is interrupted while waiting.
* @throws TimeoutException Thrown, if the latch is not triggered within the timeout time.
*/
public void await(long timeout, TimeUnit timeUnit)
throws InterruptedException, TimeoutException {
if (timeout < 0) {
throw new IllegalArgumentException("time may not be negative");
}
if (timeUnit == null) {
throw new NullPointerException("timeUnit");
}
if (timeout == 0) {
await();
} else {
final long deadline = System.nanoTime() + timeUnit.toNanos(timeout);
long millisToWait;
synchronized (lock) {
while (!triggered
&& (millisToWait = (deadline - System.nanoTime()) / 1_000_000) > 0) {
lock.wait(millisToWait);
}
if (!triggered) {
throw new TimeoutException();
}
}
}
} | 3.68 |
hbase_MetaTableAccessor_getScanForTableName | /**
* This method creates a Scan object that will only scan catalog rows that belong to the specified
* table. It doesn't specify any columns. This is a better alternative to just using a start row
* and scan until it hits a new table since that requires parsing the HRI to get the table name.
* @param tableName bytes of table's name
* @return configured Scan object
*/
public static Scan getScanForTableName(Configuration conf, TableName tableName) {
// Start key is just the table name with delimiters
byte[] startKey = ClientMetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REGION);
// Stop key appends the smallest possible char to the table name
byte[] stopKey = ClientMetaTableAccessor.getTableStopRowForMeta(tableName, QueryType.REGION);
Scan scan = getMetaScan(conf, -1);
scan.withStartRow(startKey);
scan.withStopRow(stopKey);
return scan;
} | 3.68 |
hadoop_DynamicIOStatisticsBuilder_build | /**
* Build the IOStatistics instance.
* @return an instance.
* @throws IllegalStateException if the builder has already been built.
*/
public IOStatistics build() {
final DynamicIOStatistics stats = activeInstance();
// stop the builder from working any more.
instance = null;
return stats;
} | 3.68 |
hbase_HbckChore_scanForMergedParentRegions | /**
* Scan hbase:meta to get set of merged parent regions, this is a very heavy scan.
* @return Return generated {@link HashSet}
*/
private HashSet<String> scanForMergedParentRegions() throws IOException {
HashSet<String> mergedParentRegions = new HashSet<>();
// Null tablename means scan all of meta.
MetaTableAccessor.scanMetaForTableRegions(this.master.getConnection(), r -> {
List<RegionInfo> mergeParents = CatalogFamilyFormat.getMergeRegions(r.rawCells());
if (mergeParents != null) {
for (RegionInfo mergeRegion : mergeParents) {
if (mergeRegion != null) {
// This region is already being merged
mergedParentRegions.add(mergeRegion.getEncodedName());
}
}
}
return true;
}, null);
return mergedParentRegions;
} | 3.68 |
AreaShop_RegionGroup_setSetting | /**
* Set a setting of this group.
* @param path The path to set
* @param setting The value to set
*/
public void setSetting(String path, Object setting) {
plugin.getFileManager().setGroupSetting(this, path, setting);
} | 3.68 |
dubbo_ConfigValidationUtils_checkMultiExtension | /**
* Check whether there is a <code>Extension</code> who's name (property) is <code>value</code> (special treatment is
* required)
*
* @param type The Extension type
* @param property The extension key
* @param value The Extension name
*/
public static void checkMultiExtension(ScopeModel scopeModel, Class<?> type, String property, String value) {
checkMultiExtension(scopeModel, Collections.singletonList(type), property, value);
} | 3.68 |
pulsar_ComponentImpl_isAuthorizedRole | /**
* @deprecated use {@link #isAuthorizedRole(String, String, AuthenticationParameters)} instead.
*/
@Deprecated
public boolean isAuthorizedRole(String tenant, String namespace, String clientRole,
AuthenticationDataSource authenticationData) throws PulsarAdminException {
AuthenticationParameters authParams = AuthenticationParameters.builder().clientRole(clientRole)
.clientAuthenticationDataSource(authenticationData).build();
return isAuthorizedRole(tenant, namespace, authParams);
} | 3.68 |
hbase_JVM_isLinux | /**
* Check if the OS is linux.
* @return whether this is linux or not.
*/
public static boolean isLinux() {
return linux;
} | 3.68 |
hbase_MetricsAssignmentManager_updateRITCount | /**
* set new value for number of regions in transition.
*/
public void updateRITCount(final int ritCount) {
assignmentManagerSource.setRIT(ritCount);
} | 3.68 |
flink_FlinkImageBuilder_setJavaVersion | /**
* Sets JDK version in the image.
*
* <p>This version string will be used as the tag of openjdk image. If version is not specified,
* the JDK version of the current JVM will be used.
*
* @see <a href="https://hub.docker.com/_/openjdk">OpenJDK on Docker Hub</a> for all available
* tags.
*/
public FlinkImageBuilder setJavaVersion(String javaVersion) {
this.javaVersion = javaVersion;
return this;
} | 3.68 |
hadoop_ExitUtil_haltOnOutOfMemory | /**
* Handler for out of memory events -no attempt is made here
* to cleanly shutdown or support halt blocking; a robust
* printing of the event to stderr is all that can be done.
* @param oome out of memory event
*/
public static void haltOnOutOfMemory(OutOfMemoryError oome) {
//After catching an OOM java says it is undefined behavior, so don't
//even try to clean up or we can get stuck on shutdown.
try {
System.err.println("Halting due to Out Of Memory Error...");
} catch (Throwable err) {
//Again we done want to exit because of logging issues.
}
Runtime.getRuntime().halt(-1);
} | 3.68 |
hbase_OrderedBytes_isFixedFloat64 | /**
* Return true when the next encoded value in {@code src} uses fixed-width Float64 encoding, false
* otherwise.
*/
public static boolean isFixedFloat64(PositionedByteRange src) {
return FIXED_FLOAT64
== (-1 == Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
} | 3.68 |
morf_MergeStatementBuilder_from | /**
* Specifies the select statement to use as a source of the data.
*
* @param statement the source statement.
* @return this, for method chaining.
*/
public MergeStatementBuilder from(SelectStatement statement) {
if (statement.getOrderBys().size() != 0) {
throw new IllegalArgumentException("ORDER BY is not permitted in the SELECT part of a merge statement (SQL Server limitation)");
}
this.selectStatement = statement;
return this;
} | 3.68 |
hbase_TableSchemaModel_getTableDescriptor | /** Returns a table descriptor */
@JsonIgnore
public TableDescriptor getTableDescriptor() {
TableDescriptorBuilder tableDescriptorBuilder =
TableDescriptorBuilder.newBuilder(TableName.valueOf(getName()));
for (Map.Entry<QName, Object> e : getAny().entrySet()) {
tableDescriptorBuilder.setValue(e.getKey().getLocalPart(), e.getValue().toString());
}
for (ColumnSchemaModel column : getColumns()) {
ColumnFamilyDescriptorBuilder cfdb =
ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(column.getName()));
for (Map.Entry<QName, Object> e : column.getAny().entrySet()) {
cfdb.setValue(e.getKey().getLocalPart(), e.getValue().toString());
}
tableDescriptorBuilder.setColumnFamily(cfdb.build());
}
return tableDescriptorBuilder.build();
} | 3.68 |
hudi_HoodieMultiTableStreamer_sync | /**
* Creates actual HoodieDeltaStreamer objects for every table/topic and does incremental sync.
*/
public void sync() {
for (TableExecutionContext context : tableExecutionContexts) {
try {
new HoodieStreamer(context.getConfig(), jssc, Option.ofNullable(context.getProperties())).sync();
successTables.add(Helpers.getTableWithDatabase(context));
} catch (Exception e) {
logger.error("error while running MultiTableDeltaStreamer for table: " + context.getTableName(), e);
failedTables.add(Helpers.getTableWithDatabase(context));
}
}
logger.info("Ingestion was successful for topics: " + successTables);
if (!failedTables.isEmpty()) {
logger.info("Ingestion failed for topics: " + failedTables);
}
} | 3.68 |
flink_PlanProjectOperator_map | // TODO We should use code generation for this.
@SuppressWarnings("unchecked")
@Override
public R map(Tuple inTuple) throws Exception {
for (int i = 0; i < fields.length; i++) {
outTuple.setField(inTuple.getField(fields[i]), i);
}
return (R) outTuple;
} | 3.68 |
hadoop_SnappyCodec_setConf | /**
* Set the configuration to be used by this object.
*
* @param conf the configuration object.
*/
@Override
public void setConf(Configuration conf) {
this.conf = conf;
} | 3.68 |
flink_DataSet_partitionByRange | /**
* Range-partitions a DataSet using the specified KeySelector.
*
* <p><b>Important:</b>This operation requires an extra pass over the DataSet to compute the
* range boundaries and shuffles the whole DataSet over the network. This can take significant
* amount of time.
*
* @param keyExtractor The KeyExtractor with which the DataSet is range-partitioned.
* @return The partitioned DataSet.
* @see KeySelector
*/
public <K extends Comparable<K>> PartitionOperator<T> partitionByRange(
KeySelector<T, K> keyExtractor) {
final TypeInformation<K> keyType =
TypeExtractor.getKeySelectorTypes(keyExtractor, getType());
return new PartitionOperator<>(
this,
PartitionMethod.RANGE,
new Keys.SelectorFunctionKeys<>(clean(keyExtractor), this.getType(), keyType),
Utils.getCallLocationName());
} | 3.68 |
flink_RocksDBNativeMetricOptions_enableCurSizeActiveMemTable | /** Returns approximate size of active memtable (bytes). */
public void enableCurSizeActiveMemTable() {
this.properties.add(RocksDBProperty.CurSizeActiveMemTable.getRocksDBProperty());
} | 3.68 |
morf_LoggingSqlScriptVisitor_beforeExecute | /**
* {@inheritDoc}
* @see org.alfasoftware.morf.jdbc.SqlScriptExecutor.SqlScriptVisitor#beforeExecute(java.lang.String)
*/
@Override
public void beforeExecute(String sql) {
log.info(logSchemaPositionPrefix() + "Executing [" + sql + "]");
} | 3.68 |
hadoop_LongLong_multiplication | /** Compute a*b and store the result to r.
* @return r
*/
static LongLong multiplication(final LongLong r, final long a, final long b) {
/*
final long x0 = a & LOWER_MASK;
final long x1 = (a & UPPER_MASK) >> MID;
final long y0 = b & LOWER_MASK;
final long y1 = (b & UPPER_MASK) >> MID;
final long t = (x0 + x1)*(y0 + y1);
final long u = (x0 - x1)*(y0 - y1);
final long v = x1*y1;
final long tmp = (t - u)>>>1;
result.d0 = ((t + u)>>>1) - v + ((tmp << MID) & FULL_MASK);
result.d1 = v + (tmp >> MID);
return result;
*/
final long a_lower = a & LOWER_MASK;
final long a_upper = (a & UPPER_MASK) >> MID;
final long b_lower = b & LOWER_MASK;
final long b_upper = (b & UPPER_MASK) >> MID;
final long tmp = a_lower*b_upper + a_upper*b_lower;
r.d0 = a_lower*b_lower + ((tmp << MID) & FULL_MASK);
r.d1 = a_upper*b_upper + (tmp >> MID);
return r;
} | 3.68 |
hadoop_SuccessData_putDiagnostic | /**
* Add a diagnostics entry.
* @param key name
* @param value value
*/
public void putDiagnostic(String key, String value) {
diagnostics.put(key, value);
} | 3.68 |
hbase_StripeStoreFileManager_updateCandidateFilesForRowKeyBefore | /**
* See {@link StoreFileManager#getCandidateFilesForRowKeyBefore(KeyValue)} and
* {@link StoreFileManager#updateCandidateFilesForRowKeyBefore(Iterator, KeyValue, Cell)} for
* details on this methods.
*/
@Override
public Iterator<HStoreFile> updateCandidateFilesForRowKeyBefore(
Iterator<HStoreFile> candidateFiles, final KeyValue targetKey, final Cell candidate) {
KeyBeforeConcatenatedLists.Iterator original =
(KeyBeforeConcatenatedLists.Iterator) candidateFiles;
assert original != null;
ArrayList<List<HStoreFile>> components = original.getComponents();
for (int firstIrrelevant = 0; firstIrrelevant < components.size(); ++firstIrrelevant) {
HStoreFile sf = components.get(firstIrrelevant).get(0);
byte[] endKey = endOf(sf);
// Entries are ordered as such: L0, then stripes in reverse order. We never remove
// level 0; we remove the stripe, and all subsequent ones, as soon as we find the
// first one that cannot possibly have better candidates.
if (!isInvalid(endKey) && !isOpen(endKey) && (nonOpenRowCompare(targetKey, endKey) >= 0)) {
original.removeComponents(firstIrrelevant);
break;
}
}
return original;
} | 3.68 |
hbase_HFileBlock_totalChecksumBytes | /**
* Return the number of bytes required to store all the checksums for this block. Each checksum
* value is a 4 byte integer. <br/>
* NOTE: ByteBuff returned by {@link HFileBlock#getBufferWithoutHeader()} and
* {@link HFileBlock#getBufferReadOnly} or DataInputStream returned by
* {@link HFileBlock#getByteStream()} does not include checksum.
*/
int totalChecksumBytes() {
return totalChecksumBytes;
} | 3.68 |
pulsar_MultiTopicsConsumerImpl_unsubscribeAsync | // un-subscribe a given topic
public CompletableFuture<Void> unsubscribeAsync(String topicName) {
checkArgument(TopicName.isValid(topicName), "Invalid topic name:" + topicName);
if (getState() == State.Closing || getState() == State.Closed) {
return FutureUtil.failedFuture(
new PulsarClientException.AlreadyClosedException("Topics Consumer was already closed"));
}
if (partitionsAutoUpdateTimeout != null) {
partitionsAutoUpdateTimeout.cancel();
partitionsAutoUpdateTimeout = null;
}
CompletableFuture<Void> unsubscribeFuture = new CompletableFuture<>();
String topicPartName = TopicName.get(topicName).getPartitionedTopicName();
List<ConsumerImpl<T>> consumersToUnsub = consumers.values().stream()
.filter(consumer -> {
String consumerTopicName = consumer.getTopic();
return TopicName.get(consumerTopicName).getPartitionedTopicName().equals(topicPartName);
}).collect(Collectors.toList());
List<CompletableFuture<Void>> futureList = consumersToUnsub.stream()
.map(ConsumerImpl::unsubscribeAsync).collect(Collectors.toList());
FutureUtil.waitForAll(futureList)
.whenComplete((r, ex) -> {
if (ex == null) {
consumersToUnsub.forEach(consumer1 -> {
consumers.remove(consumer1.getTopic());
pausedConsumers.remove(consumer1);
allTopicPartitionsNumber.decrementAndGet();
});
removeTopic(topicName);
if (unAckedMessageTracker instanceof UnAckedTopicMessageTracker) {
((UnAckedTopicMessageTracker) unAckedMessageTracker).removeTopicMessages(topicName);
}
unsubscribeFuture.complete(null);
log.info("[{}] [{}] [{}] Unsubscribed Topics Consumer, allTopicPartitionsNumber: {}",
topicName, subscription, consumerName, allTopicPartitionsNumber);
} else {
unsubscribeFuture.completeExceptionally(ex);
setState(State.Failed);
log.error("[{}] [{}] [{}] Could not unsubscribe Topics Consumer",
topicName, subscription, consumerName, ex.getCause());
}
});
return unsubscribeFuture;
} | 3.68 |
dubbo_DubboSpringInitContext_setKeepRunningOnSpringClosed | /**
* Keep Dubbo running when spring is stopped
* @param keepRunningOnSpringClosed
*/
public void setKeepRunningOnSpringClosed(boolean keepRunningOnSpringClosed) {
this.setModuleAttribute(ModelConstants.KEEP_RUNNING_ON_SPRING_CLOSED, keepRunningOnSpringClosed);
} | 3.68 |
hadoop_WriteManager_commitBeforeRead | // Do a possible commit before read request in case there is buffered data
// inside DFSClient which has been flushed but not synced.
int commitBeforeRead(DFSClient dfsClient, FileHandle fileHandle,
long commitOffset) {
int status;
OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
if (openFileCtx == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("No opened stream for fileId: " + fileHandle.dumpFileHandle()
+ " commitOffset=" + commitOffset
+ ". Return success in this case.");
}
status = Nfs3Status.NFS3_OK;
} else {
// commit request triggered by read won't create pending comment obj
COMMIT_STATUS ret = openFileCtx.checkCommit(dfsClient, commitOffset,
null, 0, null, true);
switch (ret) {
case COMMIT_FINISHED:
case COMMIT_INACTIVE_CTX:
status = Nfs3Status.NFS3_OK;
break;
case COMMIT_INACTIVE_WITH_PENDING_WRITE:
case COMMIT_ERROR:
status = Nfs3Status.NFS3ERR_IO;
break;
case COMMIT_WAIT:
case COMMIT_SPECIAL_WAIT:
/**
* This should happen rarely in some possible cases, such as read
* request arrives before DFSClient is able to quickly flush data to DN,
* or Prerequisite writes is not available. Won't wait since we don't
* want to block read.
*/
status = Nfs3Status.NFS3ERR_JUKEBOX;
break;
case COMMIT_SPECIAL_SUCCESS:
// Read beyond eof could result in partial read
status = Nfs3Status.NFS3_OK;
break;
default:
LOG.error("Should not get commit return code: " + ret.name());
throw new RuntimeException("Should not get commit return code: "
+ ret.name());
}
}
return status;
} | 3.68 |
flink_DualInputOperator_getOperatorInfo | /** Gets the information about the operators input/output types. */
@Override
@SuppressWarnings("unchecked")
public BinaryOperatorInformation<IN1, IN2, OUT> getOperatorInfo() {
return (BinaryOperatorInformation<IN1, IN2, OUT>) this.operatorInfo;
} | 3.68 |
hbase_AbstractViolationPolicyEnforcement_getFileSize | /**
* Computes the size of a single file on the filesystem. If the size cannot be computed for some
* reason, a {@link SpaceLimitingException} is thrown, as the file may violate a quota. If the
* provided path does not reference a file, an {@link IllegalArgumentException} is thrown.
* @param fs The FileSystem which the path refers to a file upon
* @param path The path on the {@code fs} to a file whose size is being checked
* @return The size in bytes of the file
*/
long getFileSize(FileSystem fs, String path) throws SpaceLimitingException {
final FileStatus status;
try {
status = fs.getFileStatus(new Path(Objects.requireNonNull(path)));
} catch (IOException e) {
throw new SpaceLimitingException(getPolicyName(),
"Could not verify length of file to bulk load: " + path, e);
}
if (!status.isFile()) {
throw new IllegalArgumentException(path + " is not a file.");
}
return status.getLen();
} | 3.68 |
zxing_CharacterSetECI_getCharacterSetECIByValue | /**
* @param value character set ECI value
* @return {@code CharacterSetECI} representing ECI of given value, or null if it is legal but
* unsupported
* @throws FormatException if ECI value is invalid
*/
public static CharacterSetECI getCharacterSetECIByValue(int value) throws FormatException {
if (value < 0 || value >= 900) {
throw FormatException.getFormatInstance();
}
return VALUE_TO_ECI.get(value);
} | 3.68 |
flink_RpcUtils_createRemoteRpcService | /**
* Convenient shortcut for constructing a remote RPC Service that takes care of checking for
* null and empty optionals.
*
* @see RpcSystem#remoteServiceBuilder(Configuration, String, String)
*/
public static RpcService createRemoteRpcService(
RpcSystem rpcSystem,
Configuration configuration,
@Nullable String externalAddress,
String externalPortRange,
@Nullable String bindAddress,
@SuppressWarnings("OptionalUsedAsFieldOrParameterType") Optional<Integer> bindPort)
throws Exception {
RpcSystem.RpcServiceBuilder rpcServiceBuilder =
rpcSystem.remoteServiceBuilder(configuration, externalAddress, externalPortRange);
if (bindAddress != null) {
rpcServiceBuilder = rpcServiceBuilder.withBindAddress(bindAddress);
}
if (bindPort.isPresent()) {
rpcServiceBuilder = rpcServiceBuilder.withBindPort(bindPort.get());
}
return rpcServiceBuilder.createAndStart();
} | 3.68 |
dubbo_AbstractDirectory_getCheckConnectivityPermit | /**
* for ut only
*/
@Deprecated
public Semaphore getCheckConnectivityPermit() {
return checkConnectivityPermit;
} | 3.68 |
framework_VCalendarPanel_setFocusChangeListener | /**
* The given FocusChangeListener is notified when the focused date changes
* by user either clicking on a new date or by using the keyboard.
*
* @param listener
* The FocusChangeListener to be notified
*/
public void setFocusChangeListener(FocusChangeListener listener) {
focusChangeListener = listener;
} | 3.68 |
hbase_MetricsTableRequests_updateCheckAndMutate | /**
* Update the CheckAndMutate time histogram.
* @param time time it took
*/
public void updateCheckAndMutate(long time, long blockBytesScanned) {
if (isEnableTableLatenciesMetrics()) {
checkAndMutateTimeHistogram.update(time);
if (blockBytesScanned > 0) {
blockBytesScannedCount.increment(blockBytesScanned);
checkAndMutateBlockBytesScanned.update(blockBytesScanned);
}
}
} | 3.68 |
morf_XmlDataSetProducer_isTableEmpty | /**
* @see org.alfasoftware.morf.dataset.DataSetProducer#isTableEmpty(java.lang.String)
*/
@Override
public boolean isTableEmpty(String tableName) {
final InputStream inputStream = xmlStreamProvider.openInputStreamForTable(tableName);
try {
final XMLStreamReader pullParser = openPullParser(inputStream);
PullProcessorRecordIterator pullProcessorRecordIterator = new PullProcessorRecordIterator(pullParser);
return !pullProcessorRecordIterator.hasNext();
} finally {
Closeables.closeQuietly(inputStream);
}
} | 3.68 |
hadoop_AllocateResponse_amRmToken | /**
* Set the <code>amRmToken</code> of the response.
* @see AllocateResponse#setAMRMToken(Token)
* @param amRmToken <code>amRmToken</code> of the response
* @return {@link AllocateResponseBuilder}
*/
@Private
@Unstable
public AllocateResponseBuilder amRmToken(Token amRmToken) {
allocateResponse.setAMRMToken(amRmToken);
return this;
} | 3.68 |
flink_RocksDBStateDownloader_transferAllStateDataToDirectoryAsync | /** Asynchronously runs the specified download requests on executorService. */
private Stream<CompletableFuture<Void>> transferAllStateDataToDirectoryAsync(
Collection<StateHandleDownloadSpec> handleWithPaths,
CloseableRegistry closeableRegistry) {
return handleWithPaths.stream()
.flatMap(
downloadRequest ->
// Take all files from shared and private state.
Streams.concat(
downloadRequest.getStateHandle().getSharedState()
.stream(),
downloadRequest.getStateHandle().getPrivateState()
.stream())
.map(
// Create one runnable for each StreamStateHandle
entry -> {
String localPath = entry.getLocalPath();
StreamStateHandle remoteFileHandle =
entry.getHandle();
Path downloadDest =
downloadRequest
.getDownloadDestination()
.resolve(localPath);
return ThrowingRunnable.unchecked(
() ->
downloadDataForStateHandle(
downloadDest,
remoteFileHandle,
closeableRegistry));
}))
.map(runnable -> CompletableFuture.runAsync(runnable, executorService));
} | 3.68 |
framework_AbstractInMemoryContainer_isFiltered | /**
* Returns true is the container has active filters.
*
* @return true if the container is currently filtered
*/
protected boolean isFiltered() {
return filteredItemIds != null;
} | 3.68 |
hbase_MetricsMaster_setNumNamespacesInSpaceQuotaViolation | /**
* Sets the number of namespaces in violation of a space quota.
* @see MetricsMasterQuotaSource#updateNumNamespacesInSpaceQuotaViolation(long)
*/
public void setNumNamespacesInSpaceQuotaViolation(final long numNamespacesInViolation) {
masterQuotaSource.updateNumNamespacesInSpaceQuotaViolation(numNamespacesInViolation);
} | 3.68 |
hadoop_DNSOperationsFactory_createInstance | /**
* Create and initialize a registry operations instance.
* Access rights will be determined from the configuration.
*
* @param name name of the instance
* @param impl the DNS implementation.
* @param conf configuration
* @return a registry operations instance
*/
public static DNSOperations createInstance(String name,
DNSImplementation impl,
Configuration conf) {
Preconditions.checkArgument(conf != null, "Null configuration");
DNSOperations operations = null;
switch (impl) {
case DNSJAVA:
operations = new RegistryDNS(name);
break;
default:
throw new IllegalArgumentException(
String.format("%s is not available", impl.toString()));
}
//operations.init(conf);
return operations;
} | 3.68 |
flink_PhysicalFile_deleteIfNecessary | /**
* Delete this physical file if there is no reference count from logical files (all discarded),
* and this physical file is closed (no further writing on it).
*
* @throws IOException if anything goes wrong with file system.
*/
public void deleteIfNecessary() throws IOException {
synchronized (this) {
if (!isOpen() && !deleted && this.logicalFileRefCount.get() <= 0) {
if (outputStream != null) {
try {
outputStream.close();
} catch (IOException e) {
LOG.warn("Fail to close output stream when deleting file: {}", filePath);
}
}
if (deleter != null) {
deleter.perform(filePath);
}
this.deleted = true;
}
}
} | 3.68 |
framework_EditorImpl_doEdit | /**
* Handles editor component generation and adding them to the hierarchy of
* the Grid.
*
* @param bean
* the edited item; can't be {@code null}
*/
protected void doEdit(T bean) {
Objects.requireNonNull(bean, "Editor can't edit null");
if (!isEnabled()) {
throw new IllegalStateException(
"Editing is not allowed when Editor is disabled.");
}
if (!isBuffered()) {
binder.setBean(bean);
} else {
binder.readBean(bean);
}
edited = bean;
getParent().getColumns().stream().filter(Column::isEditable)
.forEach(c -> {
Binding<T, ?> binding = c.getEditorBinding();
assert binding
.getField() instanceof Component : "Grid should enforce that the binding field is a component";
Component component = (Component) binding.getField();
addComponentToGrid(component);
columnFields.put(c, component);
getState().columnFields.put(getInternalIdForColumn(c),
component.getConnectorId());
});
eventRouter.fireEvent(new EditorOpenEvent<T>(this, edited));
} | 3.68 |
hudi_HoodieTableMetaClient_getFunctionalIndexMetadata | /**
* Returns Option of {@link HoodieFunctionalIndexMetadata} from index definition file if present, else returns empty Option.
*/
public Option<HoodieFunctionalIndexMetadata> getFunctionalIndexMetadata() {
if (functionalIndexMetadata.isPresent()) {
return functionalIndexMetadata;
}
if (tableConfig.getIndexDefinitionPath().isPresent() && StringUtils.nonEmpty(tableConfig.getIndexDefinitionPath().get())) {
Path indexDefinitionPath = new Path(tableConfig.getIndexDefinitionPath().get());
try {
return Option.of(HoodieFunctionalIndexMetadata.fromJson(new String(FileIOUtils.readDataFromPath(fs, indexDefinitionPath).get())));
} catch (IOException e) {
throw new HoodieIOException("Could not load functional index metadata at path: " + tableConfig.getIndexDefinitionPath().get(), e);
}
}
return Option.empty();
} | 3.68 |
flink_FlinkResource_get | /**
* Returns the configured FlinkResource implementation, or a {@link
* LocalStandaloneFlinkResource} if none is configured.
*
* @param setup setup instructions for the FlinkResource
* @return configured FlinkResource, or {@link LocalStandaloneFlinkResource} is none is
* configured
*/
static FlinkResource get(FlinkResourceSetup setup) {
return FactoryUtils.loadAndInvokeFactory(
FlinkResourceFactory.class,
factory -> factory.create(setup),
LocalStandaloneFlinkResourceFactory::new);
} | 3.68 |
hbase_StoreFileInfo_getReferredToRegionAndFile | /*
* Return region and file name referred to by a Reference.
* @param referenceFile HFile name which is a Reference.
* @return Calculated referenced region and file name.
* @throws IllegalArgumentException when referenceFile regex fails to match.
*/
public static Pair<String, String> getReferredToRegionAndFile(final String referenceFile) {
Matcher m = REF_NAME_PATTERN.matcher(referenceFile);
if (m == null || !m.matches()) {
LOG.warn("Failed match of store file name {}", referenceFile);
throw new IllegalArgumentException("Failed match of store file name " + referenceFile);
}
String referencedRegion = m.group(2);
String referencedFile = m.group(1);
LOG.trace("reference {} to region={} file={}", referenceFile, referencedRegion, referencedFile);
return new Pair<>(referencedRegion, referencedFile);
} | 3.68 |
flink_RocksDBIncrementalRestoreOperation_createColumnFamilyDescriptors | /**
* This method recreates and registers all {@link ColumnFamilyDescriptor} from Flink's state
* meta data snapshot.
*/
private List<ColumnFamilyDescriptor> createColumnFamilyDescriptors(
List<StateMetaInfoSnapshot> stateMetaInfoSnapshots, boolean registerTtlCompactFilter) {
List<ColumnFamilyDescriptor> columnFamilyDescriptors =
new ArrayList<>(stateMetaInfoSnapshots.size());
for (StateMetaInfoSnapshot stateMetaInfoSnapshot : stateMetaInfoSnapshots) {
RegisteredStateMetaInfoBase metaInfoBase =
RegisteredStateMetaInfoBase.fromMetaInfoSnapshot(stateMetaInfoSnapshot);
ColumnFamilyDescriptor columnFamilyDescriptor =
RocksDBOperationUtils.createColumnFamilyDescriptor(
metaInfoBase,
this.rocksHandle.getColumnFamilyOptionsFactory(),
registerTtlCompactFilter
? this.rocksHandle.getTtlCompactFiltersManager()
: null,
this.rocksHandle.getWriteBufferManagerCapacity());
columnFamilyDescriptors.add(columnFamilyDescriptor);
}
return columnFamilyDescriptors;
} | 3.68 |
framework_ReverseConverter_convertToModel | /*
* (non-Javadoc)
*
* @see com.vaadin.data.util.converter.Converter#convertToModel(java
* .lang.Object, java.util.Locale)
*/
@Override
public MODEL convertToModel(PRESENTATION value,
Class<? extends MODEL> targetType, Locale locale)
throws ConversionException {
return realConverter.convertToPresentation(value, targetType, locale);
} | 3.68 |
hmily_GsonUtils_getType | /**
* Get JsonElement class type.
*
* @param element the element
* @return Class class
*/
public Class getType(final JsonElement element) {
if (!element.isJsonPrimitive()) {
return element.getClass();
}
final JsonPrimitive primitive = element.getAsJsonPrimitive();
if (primitive.isString()) {
return String.class;
} else if (primitive.isNumber()) {
String numStr = primitive.getAsString();
if (numStr.contains(DOT) || numStr.contains(E)
|| numStr.contains("E")) {
return Double.class;
}
return Long.class;
} else if (primitive.isBoolean()) {
return Boolean.class;
} else {
return element.getClass();
}
} | 3.68 |
hadoop_KerberosSecurityTestcase_createMiniKdcConf | /**
* Create a Kdc configuration
*/
public void createMiniKdcConf() {
conf = MiniKdc.createConf();
} | 3.68 |
pulsar_ProxyService_shutdownEventLoop | // Shutdown the event loop.
// If graceful is true, will wait for the current requests to be completed, up to 15 seconds.
// Graceful shutdown can be disabled by setting the gracefulShutdown flag to false. This is used in tests
// to speed up the shutdown process.
private Future<?> shutdownEventLoop(EventLoopGroup eventLoop) {
if (gracefulShutdown) {
return eventLoop.shutdownGracefully();
} else {
return eventLoop.shutdownGracefully(0, 0, TimeUnit.SECONDS);
}
} | 3.68 |
hbase_HFileInfo_parsePB | /**
* Fill our map with content of the pb we read off disk
* @param fip protobuf message to read
*/
void parsePB(final HFileProtos.FileInfoProto fip) {
this.map.clear();
for (BytesBytesPair pair : fip.getMapEntryList()) {
this.map.put(pair.getFirst().toByteArray(), pair.getSecond().toByteArray());
}
} | 3.68 |
hbase_MultiTableInputFormatBase_setTableRecordReader | /**
* Allows subclasses to set the {@link TableRecordReader}.
* @param tableRecordReader A different {@link TableRecordReader} implementation.
*/
protected void setTableRecordReader(TableRecordReader tableRecordReader) {
this.tableRecordReader = tableRecordReader;
} | 3.68 |
hadoop_ActiveAuditManagerS3A_isActive | /**
* Is the span active?
* @return true if this span is the active one for the current thread.
*/
private boolean isActive() {
return this == getActiveAuditSpan();
} | 3.68 |
hadoop_ResourcePool_close | /**
* Derived classes may implement a way to cleanup each item.
*
* @param item the resource to close.
*/
protected void close(T item) {
// Do nothing in this class. Allow overriding classes to take any cleanup action.
} | 3.68 |
morf_AbstractSqlDialectTest_shouldGenerateCorrectSqlForMathOperations16 | /**
* Test for proper SQL mathematics operation generation from DSL expressions.
*/
@Test
public void shouldGenerateCorrectSqlForMathOperations16() {
String result = testDialect.getSqlFrom(field("a").plus(field("b")).plus(field("c")).divideBy(literal(2)).plus(field("z")));
assertEquals(expectedSqlForMathOperations16(), result);
} | 3.68 |
flink_CheckpointStatsHistory_createSnapshot | /**
* Creates a snapshot of the current state.
*
* @return Snapshot of the current state.
*/
CheckpointStatsHistory createSnapshot() {
if (readOnly) {
throw new UnsupportedOperationException(
"Can't create a snapshot of a read-only history.");
}
List<AbstractCheckpointStats> checkpointsHistory;
Map<Long, AbstractCheckpointStats> checkpointsById;
checkpointsById = CollectionUtil.newHashMapWithExpectedSize(checkpointsArray.length);
if (maxSize == 0) {
checkpointsHistory = Collections.emptyList();
} else {
AbstractCheckpointStats[] newCheckpointsArray =
new AbstractCheckpointStats[checkpointsArray.length];
System.arraycopy(
checkpointsArray,
nextPos,
newCheckpointsArray,
0,
checkpointsArray.length - nextPos);
System.arraycopy(
checkpointsArray,
0,
newCheckpointsArray,
checkpointsArray.length - nextPos,
nextPos);
checkpointsHistory = Arrays.asList(newCheckpointsArray);
// reverse the order such that we start with the youngest checkpoint
Collections.reverse(checkpointsHistory);
for (AbstractCheckpointStats checkpoint : checkpointsHistory) {
checkpointsById.put(checkpoint.getCheckpointId(), checkpoint);
}
}
if (latestCompletedCheckpoint != null) {
checkpointsById.put(
latestCompletedCheckpoint.getCheckpointId(), latestCompletedCheckpoint);
}
if (latestFailedCheckpoint != null) {
checkpointsById.put(latestFailedCheckpoint.getCheckpointId(), latestFailedCheckpoint);
}
if (latestSavepoint != null) {
checkpointsById.put(latestSavepoint.getCheckpointId(), latestSavepoint);
}
return new CheckpointStatsHistory(
true,
maxSize,
null,
checkpointsHistory,
checkpointsById,
latestCompletedCheckpoint,
latestFailedCheckpoint,
latestSavepoint);
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectMaximum | /**
* Tests select statement with maximum function.
*/
@Test
public void testSelectMaximum() {
SelectStatement stmt = new SelectStatement(max(new FieldReference(INT_FIELD))).from(new TableReference(TEST_TABLE));
String expectedSql = "SELECT MAX(intField) FROM " + tableName(TEST_TABLE);
assertEquals("Select scripts are not the same", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
flink_AllWindowedStream_trigger | /** Sets the {@code Trigger} that should be used to trigger window emission. */
@PublicEvolving
public AllWindowedStream<T, W> trigger(Trigger<? super T, ? super W> trigger) {
if (windowAssigner instanceof MergingWindowAssigner && !trigger.canMerge()) {
throw new UnsupportedOperationException(
"A merging window assigner cannot be used with a trigger that does not support merging.");
}
this.trigger = trigger;
return this;
} | 3.68 |
flink_HiveTableUtil_createSchema | /** Create a Flink's Schema from Hive table's columns and partition keys. */
public static org.apache.flink.table.api.Schema createSchema(
List<FieldSchema> nonPartCols,
List<FieldSchema> partitionKeys,
Set<String> notNullColumns,
@Nullable UniqueConstraint primaryKey) {
Tuple2<String[], DataType[]> columnInformation =
getColumnInformation(nonPartCols, partitionKeys, notNullColumns, primaryKey);
org.apache.flink.table.api.Schema.Builder builder =
org.apache.flink.table.api.Schema.newBuilder()
.fromFields(columnInformation.f0, columnInformation.f1);
if (primaryKey != null) {
builder.primaryKeyNamed(
primaryKey.getName(), primaryKey.getColumns().toArray(new String[0]));
}
return builder.build();
} | 3.68 |
morf_Function_deepCopyInternal | /**
* @see org.alfasoftware.morf.sql.element.AliasedField#deepCopyInternal(DeepCopyTransformation)
*/
@Override
protected Function deepCopyInternal(DeepCopyTransformation transformer) {
return new Function(Function.this, transformer);
} | 3.68 |
hbase_BlockCache_blockFitsIntoTheCache | /**
* Checks whether there's enough space left in the cache to accommodate the passed block. This
* method may not be overridden by all implementing classes. In such cases, the returned Optional
* will be empty. For subclasses implementing this logic, the returned Optional would contain the
* boolean value reflecting if the passed block fits into the remaining cache space available.
* @param block the block we want to check if fits into the cache.
* @return empty optional if this method is not supported, otherwise the returned optional
* contains the boolean value informing if the block fits into the cache available space.
*/
default Optional<Boolean> blockFitsIntoTheCache(HFileBlock block) {
return Optional.empty();
} | 3.68 |
framework_BootstrapHandler_getResponse | /**
* Gets the Vaadin/HTTP response.
*
* @return the Vaadin/HTTP response
*/
public VaadinResponse getResponse() {
return response;
} | 3.68 |
hadoop_ClientCache_stopClient | /**
* Stop a RPC client connection
* A RPC client is closed only when its reference count becomes zero.
*
* @param client input client.
*/
public void stopClient(Client client) {
if (Client.LOG.isDebugEnabled()) {
Client.LOG.debug("stopping client from cache: " + client);
}
final int count;
synchronized (this) {
count = client.decAndGetCount();
if (count == 0) {
if (Client.LOG.isDebugEnabled()) {
Client.LOG.debug("removing client from cache: " + client);
}
clients.remove(client.getSocketFactory());
}
}
if (count == 0) {
if (Client.LOG.isDebugEnabled()) {
Client.LOG.debug("stopping actual client because no more references remain: "
+ client);
}
client.stop();
}
} | 3.68 |
flink_CharValue_setValue | /**
* Sets the encapsulated char to the specified value.
*
* @param value the new value of the encapsulated char.
*/
public void setValue(char value) {
this.value = value;
} | 3.68 |
hadoop_SignerSecretProvider_destroy | /**
* Will be called on shutdown; subclasses should perform any cleanup here.
*/
public void destroy() {} | 3.68 |
druid_SQLMethodInvokeExpr_addParameter | /**
* deprecated, instead of addArgument
*
* @deprecated
*/
public void addParameter(SQLExpr param) {
if (param != null) {
param.setParent(this);
}
this.arguments.add(param);
} | 3.68 |
hbase_ZKSplitLogManagerCoordination_setIgnoreDeleteForTesting | /**
* Temporary function that is used by unit tests only
*/
public void setIgnoreDeleteForTesting(boolean b) {
ignoreZKDeleteForTesting = b;
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.