name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_FindOptions_getConfiguration | /**
* Return the {@link Configuration} return configuration {@link Configuration}
* @return configuration.
*/
public Configuration getConfiguration() {
return this.configuration;
} | 3.68 |
hadoop_CounterGroupFactory_getFrameworkGroupId | /**
* Get the id of a framework group
* @param name of the group
* @return the framework group id
*/
public static synchronized int getFrameworkGroupId(String name) {
Integer i = s2i.get(name);
if (i == null) throwBadFrameworkGroupNameException(name);
return i;
} | 3.68 |
MagicPlugin_MageDataStore_close | /**
* Close this data store. This should close any open connections and free up any resources that would be
* contentious if this data store was immediately re-created.
*/
default void close() {
} | 3.68 |
flink_TypeSerializerSchemaCompatibility_isCompatibleWithReconfiguredSerializer | /**
* Returns whether or not the type of the compatibility is {@link
* Type#COMPATIBLE_WITH_RECONFIGURED_SERIALIZER}.
*
* @return whether or not the type of the compatibility is {@link
* Type#COMPATIBLE_WITH_RECONFIGURED_SERIALIZER}.
*/
public boolean isCompatibleWithReconfiguredSerializer() {
return resultType == Type.COMPATIBLE_WITH_RECONFIGURED_SERIALIZER;
} | 3.68 |
hudi_HoodieFileGroup_addLogFile | /**
* Add a new log file into the group.
*
* <p>CAUTION: the log file must be added in sequence of the delta commit time.
*/
public void addLogFile(CompletionTimeQueryView completionTimeQueryView, HoodieLogFile logFile) {
String baseInstantTime = getBaseInstantTime(completionTimeQueryView, logFile);
if (!fileSlices.containsKey(baseInstantTime)) {
fileSlices.put(baseInstantTime, new FileSlice(fileGroupId, baseInstantTime));
}
fileSlices.get(baseInstantTime).addLogFile(logFile);
} | 3.68 |
hbase_MemStoreSizing_decMemStoreSize | /** Returns The new dataSize ONLY as a convenience */
default long decMemStoreSize(long dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta,
int cellsCountDelta) {
return incMemStoreSize(-dataSizeDelta, -heapSizeDelta, -offHeapSizeDelta, -cellsCountDelta);
} | 3.68 |
hbase_AuthManager_accessUserTable | /**
* Checks if the user has access to the full table or at least a family/qualifier for the
* specified action.
* @param user user name
* @param table table name
* @param action action in one of [Read, Write, Create, Exec, Admin]
* @return true if the user has access to the table, false otherwise
*/
public boolean accessUserTable(User user, TableName table, Permission.Action action) {
if (user == null) {
return false;
}
if (table == null) {
table = PermissionStorage.ACL_TABLE_NAME;
}
if (authorizeUserNamespace(user, table.getNamespaceAsString(), action)) {
return true;
}
PermissionCache<TablePermission> tblPermissions =
tableCache.getOrDefault(table, TBL_NO_PERMISSION);
if (hasAccessTable(tblPermissions.get(user.getShortName()), action)) {
return true;
}
for (String group : user.getGroupNames()) {
if (hasAccessTable(tblPermissions.get(AuthUtil.toGroupEntry(group)), action)) {
return true;
}
}
return false;
} | 3.68 |
hbase_BloomFilterMetrics_incrementRequests | /**
* Increment bloom request count, and negative result count if !passed
*/
public void incrementRequests(boolean passed) {
requests.increment();
if (!passed) {
negativeResults.increment();
}
} | 3.68 |
flink_OperationUtils_formatWithChildren | /**
* Formats a Tree of {@link Operation} in a unified way. It prints all the parameters and adds
* all children formatted and properly indented in the following lines.
*
* <p>The format is
*
* <pre>{@code
* <operationName>: [(key1: [value1], key2: [v1, v2])]
* <child1>
* <child2>
* <child3>
* }</pre>
*
* @param operationName The operation name.
* @param parameters The operation's parameters.
* @param children The operation's children.
* @param childToString The function to convert child to String.
* @param <T> The type of the child.
* @return String representation of the given operation.
*/
public static <T extends Operation> String formatWithChildren(
String operationName,
Map<String, Object> parameters,
List<T> children,
Function<T, String> childToString) {
String description =
parameters.entrySet().stream()
.map(entry -> formatParameter(entry.getKey(), entry.getValue()))
.collect(Collectors.joining(", "));
final StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append(operationName).append(":");
if (!StringUtils.isNullOrWhitespaceOnly(description)) {
stringBuilder.append(" (").append(description).append(")");
}
String childrenDescription =
children.stream()
.map(child -> OperationUtils.indent(childToString.apply(child)))
.collect(Collectors.joining());
return stringBuilder.append(childrenDescription).toString();
} | 3.68 |
hadoop_HsCountersPage_preHead | /*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
*/
@Override protected void preHead(Page.HTML<__> html) {
commonPreHead(html);
setActiveNavColumnForTask();
set(DATATABLES_SELECTOR, "#counters .dt-counters");
set(initSelector(DATATABLES),
"{bJQueryUI:true, sDom:'t', iDisplayLength:-1}");
} | 3.68 |
framework_BasicEvent_getDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.addon.calendar.event.CalendarEvent#getDescription()
*/
@Override
public String getDescription() {
return description;
} | 3.68 |
hadoop_AbstractS3ACommitter_abortPendingUploadsInCleanup | /**
* Abort all pending uploads to the destination directory during
* job cleanup operations.
* Note: this instantiates the thread pool if required -so
* @param suppressExceptions should exceptions be suppressed
* @param commitContext commit context
* @throws IOException IO problem
*/
protected void abortPendingUploadsInCleanup(
boolean suppressExceptions,
CommitContext commitContext) throws IOException {
// return early if aborting is disabled.
if (!shouldAbortUploadsInCleanup()) {
LOG.debug("Not cleanup up pending uploads to {} as {} is false ",
getOutputPath(),
FS_S3A_COMMITTER_ABORT_PENDING_UPLOADS);
return;
}
Path dest = getOutputPath();
try (DurationInfo ignored =
new DurationInfo(LOG, "Aborting all pending commits under %s",
dest)) {
CommitOperations ops = getCommitOperations();
List<MultipartUpload> pending;
try {
pending = ops.listPendingUploadsUnderPath(dest);
} catch (IOException e) {
// Swallow any errors given this is best effort
LOG.debug("Failed to list pending uploads under {}", dest, e);
return;
}
if (!pending.isEmpty()) {
LOG.warn("{} pending uploads were found -aborting", pending.size());
LOG.warn("If other tasks/jobs are writing to {},"
+ "this action may cause them to fail", dest);
TaskPool.foreach(pending)
.executeWith(commitContext.getOuterSubmitter())
.suppressExceptions(suppressExceptions)
.run(u -> commitContext.abortMultipartCommit(
u.key(), u.uploadId()));
} else {
LOG.info("No pending uploads were found");
}
}
} | 3.68 |
flink_FlinkRelMdCollation_filter | /** Helper method to determine a {@link org.apache.calcite.rel.core.Filter}'s collation. */
public static List<RelCollation> filter(RelMetadataQuery mq, RelNode input) {
return mq.collations(input);
} | 3.68 |
pulsar_TopicsBase_publishMessages | // Publish message to a topic, can be partitioned or non-partitioned
protected void publishMessages(AsyncResponse asyncResponse, ProducerMessages request, boolean authoritative) {
String topic = topicName.getPartitionedTopicName();
try {
if (pulsar().getBrokerService().getOwningTopics().containsKey(topic)
|| !findOwnerBrokerForTopic(authoritative, asyncResponse)) {
// If we've done look up or or after look up this broker owns some of the partitions
// then proceed to publish message else asyncResponse will be complete by look up.
addOrGetSchemaForTopic(getSchemaData(request.getKeySchema(), request.getValueSchema()),
request.getSchemaVersion() == -1 ? null : new LongSchemaVersion(request.getSchemaVersion()))
.thenAccept(schemaMeta -> {
// Both schema version and schema data are necessary.
if (schemaMeta.getLeft() != null && schemaMeta.getRight() != null) {
internalPublishMessages(topicName, request, pulsar().getBrokerService()
.getOwningTopics().get(topic).values(), asyncResponse,
AutoConsumeSchema.getSchema(schemaMeta.getLeft().toSchemaInfo()),
schemaMeta.getRight());
} else {
asyncResponse.resume(new RestException(Status.INTERNAL_SERVER_ERROR,
"Fail to add or retrieve schema."));
}
}).exceptionally(e -> {
if (log.isDebugEnabled()) {
log.debug("Fail to publish message: " + e.getMessage());
}
asyncResponse.resume(new RestException(Status.INTERNAL_SERVER_ERROR, "Fail to publish message:"
+ e.getMessage()));
return null;
});
}
} catch (Exception e) {
asyncResponse.resume(new RestException(Status.INTERNAL_SERVER_ERROR, "Fail to publish message: "
+ e.getMessage()));
}
} | 3.68 |
flink_Predicates_containAnyFieldsInClassHierarchyThat | /**
* @return A {@link DescribedPredicate} returning true, if and only if the predicate {@link
* JavaField} could be found in the {@link JavaClass}.
*/
public static DescribedPredicate<JavaClass> containAnyFieldsInClassHierarchyThat(
DescribedPredicate<? super JavaField> predicate) {
return new ContainAnyFieldsThatPredicate<>("fields", JavaClass::getAllFields, predicate);
} | 3.68 |
hadoop_FederationMembershipStateStoreInputValidator_checkCapability | /**
* Validate if the Capability is present or not.
*
* @param capability the capability of the subcluster to be verified
* @throws FederationStateStoreInvalidInputException if the capability is
* invalid
*/
private static void checkCapability(String capability)
throws FederationStateStoreInvalidInputException {
if (capability == null || capability.isEmpty()) {
String message = "Invalid capability information."
+ " Please try again by specifying valid Capability Information.";
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
} | 3.68 |
hbase_MasterCoprocessorHost_preTruncateRegionAction | /**
* Invoked just before calling the truncate region procedure
* @param region Region to be truncated
* @param user The user
*/
public void preTruncateRegionAction(final RegionInfo region, User user) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) {
@Override
public void call(MasterObserver observer) throws IOException {
observer.preTruncateRegionAction(this, region);
}
});
} | 3.68 |
hbase_HBaseTestingUtility_setupMiniKdc | /**
* Sets up {@link MiniKdc} for testing security. Uses {@link HBaseKerberosUtils} to set the given
* keytab file as {@link HBaseKerberosUtils#KRB_KEYTAB_FILE}. FYI, there is also the easier-to-use
* kerby KDC server and utility for using it,
* {@link org.apache.hadoop.hbase.util.SimpleKdcServerUtil}. The kerby KDC server is preferred;
* less baggage. It came in in HBASE-5291.
*/
public MiniKdc setupMiniKdc(File keytabFile) throws Exception {
Properties conf = MiniKdc.createConf();
conf.put(MiniKdc.DEBUG, true);
MiniKdc kdc = null;
File dir = null;
// There is time lag between selecting a port and trying to bind with it. It's possible that
// another service captures the port in between which'll result in BindException.
boolean bindException;
int numTries = 0;
do {
try {
bindException = false;
dir = new File(getDataTestDir("kdc").toUri().getPath());
kdc = new MiniKdc(conf, dir);
kdc.start();
} catch (BindException e) {
FileUtils.deleteDirectory(dir); // clean directory
numTries++;
if (numTries == 3) {
LOG.error("Failed setting up MiniKDC. Tried " + numTries + " times.");
throw e;
}
LOG.error("BindException encountered when setting up MiniKdc. Trying again.");
bindException = true;
}
} while (bindException);
HBaseKerberosUtils.setKeytabFileForTesting(keytabFile.getAbsolutePath());
return kdc;
} | 3.68 |
hbase_LruBlockCache_runEviction | /**
* Multi-threaded call to run the eviction process.
*/
private void runEviction() {
if (evictionThread == null || !evictionThread.isGo()) {
evict();
} else {
evictionThread.evict();
}
} | 3.68 |
dubbo_IOUtils_write | /**
* write.
*
* @param reader Reader.
* @param writer Writer.
* @param bufferSize buffer size.
* @return count.
* @throws IOException If an I/O error occurs
*/
public static long write(Reader reader, Writer writer, int bufferSize) throws IOException {
int read;
long total = 0;
char[] buf = new char[bufferSize];
while ((read = reader.read(buf)) != -1) {
writer.write(buf, 0, read);
total += read;
}
return total;
} | 3.68 |
hbase_RecoverableZooKeeper_getMaxMultiSizeLimit | /**
* Returns the maximum size (in bytes) that should be included in any single multi() call. NB:
* This is an approximation, so there may be variance in the msg actually sent over the wire.
* Please be sure to set this approximately, with respect to your ZK server configuration for
* jute.maxbuffer.
*/
public int getMaxMultiSizeLimit() {
return maxMultiSize;
} | 3.68 |
flink_FutureUtils_forwardAsync | /**
* Forwards the value from the source future to the target future using the provided executor.
*
* @param source future to forward the value from
* @param target future to forward the value to
* @param executor executor to forward the source value to the target future
* @param <T> type of the value
*/
public static <T> void forwardAsync(
CompletableFuture<T> source, CompletableFuture<T> target, Executor executor) {
source.whenCompleteAsync(forwardTo(target), executor);
} | 3.68 |
druid_AntsparkOutputVisitor_visit | //add using statment
@Override
public boolean visit(AntsparkCreateTableStatement x) {
print0(ucase ? "CREATE " : "create ");
if (x.isExternal()) {
print0(ucase ? "EXTERNAL " : "external ");
}
if (x.isIfNotExists()) {
print0(ucase ? "TABLE IF NOT EXISTS " : "table if not exists ");
} else {
print0(ucase ? "TABLE " : "table ");
}
x.getName().accept(this);
if (x.getLike() != null) {
print0(ucase ? " LIKE " : " like ");
x.getLike().accept(this);
}
final List<SQLTableElement> tableElementList = x.getTableElementList();
int size = tableElementList.size();
if (size > 0) {
print0(" (");
if (this.isPrettyFormat() && x.hasBodyBeforeComment()) {
print(' ');
printlnComment(x.getBodyBeforeCommentsDirect());
}
this.indentCount++;
println();
for (int i = 0; i < size; ++i) {
SQLTableElement element = tableElementList.get(i);
element.accept(this);
if (i != size - 1) {
print(',');
}
if (this.isPrettyFormat() && element.hasAfterComment()) {
print(' ');
printlnComment(element.getAfterCommentsDirect());
}
if (i != size - 1) {
println();
}
}
this.indentCount--;
println();
print(')');
}
if (x.getDatasource() != null) {
println();
print0(ucase ? "USING " : "using ");
print0(x.getDatasource().toString());
}
if (x.getComment() != null) {
println();
print0(ucase ? "COMMENT " : "comment ");
x.getComment().accept(this);
}
int partitionSize = x.getPartitionColumns().size();
if (partitionSize > 0) {
println();
print0(ucase ? "PARTITIONED BY (" : "partitioned by (");
this.indentCount++;
println();
for (int i = 0; i < partitionSize; ++i) {
SQLColumnDefinition column = x.getPartitionColumns().get(i);
column.accept(this);
if (i != partitionSize - 1) {
print(',');
}
if (this.isPrettyFormat() && column.hasAfterComment()) {
print(' ');
printlnComment(column.getAfterCommentsDirect());
}
if (i != partitionSize - 1) {
println();
}
}
this.indentCount--;
println();
print(')');
}
List<SQLSelectOrderByItem> clusteredBy = x.getClusteredBy();
if (clusteredBy.size() > 0) {
println();
print0(ucase ? "CLUSTERED BY (" : "clustered by (");
printAndAccept(clusteredBy, ",");
print(')');
}
List<SQLSelectOrderByItem> sortedBy = x.getSortedBy();
if (sortedBy.size() > 0) {
println();
print0(ucase ? "SORTED BY (" : "sorted by (");
printAndAccept(sortedBy, ", ");
print(')');
}
int buckets = x.getBuckets();
if (buckets > 0) {
println();
print0(ucase ? "INTO " : "into ");
print(buckets);
print0(ucase ? " BUCKETS" : " buckets");
}
SQLExpr storedAs = x.getStoredAs();
if (storedAs != null) {
println();
print0(ucase ? "STORED AS " : "stored as ");
storedAs.accept(this);
}
SQLSelect select = x.getSelect();
if (select != null) {
println();
print0(ucase ? "AS" : "as");
println();
select.accept(this);
}
Map<String, SQLObject> serdeProperties = x.getSerdeProperties();
if (serdeProperties.size() > 0) {
println();
print0(ucase ? "TBLPROPERTIES (" : "tblproperties (");
String seperator = "";
for (Entry<String, SQLObject> entry : serdeProperties.entrySet()) {
print0("'" + entry.getKey() + "'='");
entry.getValue().accept(this);
print0("'" + seperator);
seperator = ",";
}
print(')');
}
SQLExpr location = x.getLocation();
if (location != null) {
println();
print0(ucase ? "LOCATION " : "location ");
location.accept(this);
}
return false;
} | 3.68 |
streampipes_JdbcClient_connect | /**
* Connects to the SQL database and initializes {@link JdbcClient#connection}
*
* @throws SpRuntimeException When the connection could not be established (because of a
* wrong identification, missing database etc.)
*/
private void connect(String host, int port, String databaseName) throws SpRuntimeException {
String url = "jdbc:" + this.dbDescription.getEngine().getUrlName() + "://" + host + ":" + port + "/";
try {
connection = DriverManager.getConnection(
url, this.dbDescription.getUsername(),
this.dbDescription.getPassword());
ensureDatabaseExists(databaseName);
ensureTableExists(url, databaseName);
} catch (SQLException e) {
throw new SpRuntimeException("Could not establish a connection with the server: " + e.getMessage());
}
} | 3.68 |
hudi_Option_fromJavaOptional | /**
* Convert from java.util.Optional.
*
* @param v java.util.Optional object
* @param <T> type of the value stored in java.util.Optional object
* @return Option
*/
public static <T> Option<T> fromJavaOptional(Optional<T> v) {
return Option.ofNullable(v.orElse(null));
} | 3.68 |
hadoop_EntityGroupFSTimelineStoreMetrics_addActiveLogDirScanTime | // Log scanner and cleaner related
public void addActiveLogDirScanTime(long msec) {
activeLogDirScan.add(msec);
} | 3.68 |
zxing_GenericGF_inverse | /**
* @return multiplicative inverse of a
*/
int inverse(int a) {
if (a == 0) {
throw new ArithmeticException();
}
return expTable[size - logTable[a] - 1];
} | 3.68 |
framework_MultiSelectionModelImpl_selectionContainsId | /**
* Returns if the given id belongs to one of the selected items.
*
* @param id
* the id to check for
* @return {@code true} if id is selected, {@code false} if not
*/
protected boolean selectionContainsId(Object id) {
DataProvider<T, ?> dataProvider = getGrid().getDataProvider();
return selection.stream().map(dataProvider::getId)
.anyMatch(i -> id.equals(i));
} | 3.68 |
shardingsphere-elasticjob_JobConfiguration_cron | /**
* Cron expression.
*
* @param cron cron expression
* @return job configuration builder
*/
public Builder cron(final String cron) {
if (null != cron) {
this.cron = cron;
}
return this;
} | 3.68 |
framework_Button_setClickShortcut | /**
* Makes it possible to invoke a click on this button by pressing the given
* {@link KeyCode} and (optional) {@link ModifierKey}s.<br/>
* The shortcut is global (bound to the containing Window).
*
* @param keyCode
* the keycode for invoking the shortcut
* @param modifiers
* the (optional) modifiers for invoking the shortcut, null for
* none
*/
public void setClickShortcut(int keyCode, int... modifiers) {
if (clickShortcut != null) {
removeShortcutListener(clickShortcut);
}
clickShortcut = new ClickShortcut(this, keyCode, modifiers);
addShortcutListener(clickShortcut);
getState().clickShortcutKeyCode = clickShortcut.getKeyCode();
} | 3.68 |
flink_Path_hasWindowsDrive | /**
* Checks if the provided path string contains a windows drive letter.
*
* @param path the path to check
* @param slashed true to indicate the first character of the string is a slash, false otherwise
* @return <code>true</code> if the path string contains a windows drive letter, false otherwise
*/
private boolean hasWindowsDrive(String path, boolean slashed) {
final int start = slashed ? 1 : 0;
return path.length() >= start + 2
&& (!slashed || path.charAt(0) == '/')
&& path.charAt(start + 1) == ':'
&& ((path.charAt(start) >= 'A' && path.charAt(start) <= 'Z')
|| (path.charAt(start) >= 'a' && path.charAt(start) <= 'z'));
} | 3.68 |
hbase_BackupManager_startBackupSession | /**
* Starts new backup session
* @throws IOException if active session already exists
*/
public void startBackupSession() throws IOException {
long startTime = EnvironmentEdgeManager.currentTime();
long timeout = conf.getInt(BACKUP_EXCLUSIVE_OPERATION_TIMEOUT_SECONDS_KEY,
DEFAULT_BACKUP_EXCLUSIVE_OPERATION_TIMEOUT) * 1000L;
long lastWarningOutputTime = 0;
while (EnvironmentEdgeManager.currentTime() - startTime < timeout) {
try {
systemTable.startBackupExclusiveOperation();
return;
} catch (IOException e) {
if (e instanceof ExclusiveOperationException) {
// sleep, then repeat
try {
Thread.sleep(1000);
} catch (InterruptedException e1) {
// Restore the interrupted status
Thread.currentThread().interrupt();
}
if (
lastWarningOutputTime == 0
|| (EnvironmentEdgeManager.currentTime() - lastWarningOutputTime) > 60000
) {
lastWarningOutputTime = EnvironmentEdgeManager.currentTime();
LOG.warn("Waiting to acquire backup exclusive lock for {}s",
+(lastWarningOutputTime - startTime) / 1000);
}
} else {
throw e;
}
}
}
throw new IOException(
"Failed to acquire backup system table exclusive lock after " + timeout / 1000 + "s");
} | 3.68 |
hbase_AccessControlClient_hasPermission | /**
* Validates whether specified user has permission to perform actions on the mentioned table,
* column family or column qualifier.
* @param connection Connection
* @param tableName Table name, it shouldn't be null or empty.
* @param columnFamily The column family. Optional argument, can be empty. If empty then
* validation will happen at table level.
* @param columnQualifier The column qualifier. Optional argument, can be empty. If empty then
* validation will happen at table and column family level. columnQualifier
* will not be considered if columnFamily is passed as null or empty.
* @param userName User name, it shouldn't be null or empty.
* @param actions Actions
* @return true if access allowed to the specified user, otherwise false.
* @throws Throwable on failure
*/
public static boolean hasPermission(Connection connection, String tableName, byte[] columnFamily,
byte[] columnQualifier, String userName, Permission.Action... actions) throws Throwable {
if (StringUtils.isEmpty(tableName) || StringUtils.isEmpty(userName)) {
throw new IllegalArgumentException("Table and user name can't be null or empty.");
}
List<Permission> permissions = new ArrayList<>(1);
permissions.add(Permission.newBuilder(TableName.valueOf(tableName)).withFamily(columnFamily)
.withQualifier(columnQualifier).withActions(actions).build());
return connection.getAdmin().hasUserPermissions(userName, permissions).get(0);
} | 3.68 |
hbase_EnableTableProcedure_prepareEnable | /**
* Action before any real action of enabling table. Set the exception in the procedure instead of
* throwing it. This approach is to deal with backward compatible with 1.0.
* @param env MasterProcedureEnv
* @return whether the table passes the necessary checks
*/
private boolean prepareEnable(final MasterProcedureEnv env) throws IOException {
boolean canTableBeEnabled = true;
// Check whether table exists
if (!env.getMasterServices().getTableDescriptors().exists(tableName)) {
setFailure("master-enable-table", new TableNotFoundException(tableName));
canTableBeEnabled = false;
} else {
// There could be multiple client requests trying to disable or enable
// the table at the same time. Ensure only the first request is honored
// After that, no other requests can be accepted until the table reaches
// DISABLED or ENABLED.
//
// Note: in 1.0 release, we called TableStateManager.setTableStateIfInStates() to set
// the state to ENABLING from DISABLED. The implementation was done before table lock
// was implemented. With table lock, there is no need to set the state here (it will
// set the state later on). A quick state check should be enough for us to move forward.
TableStateManager tsm = env.getMasterServices().getTableStateManager();
TableState ts = tsm.getTableState(tableName);
if (!ts.isDisabled()) {
LOG.info("Not DISABLED tableState={}; skipping enable; {}", ts.getState(), this);
setFailure("master-enable-table", new TableNotDisabledException(ts.toString()));
canTableBeEnabled = false;
}
}
// We are done the check. Future actions in this procedure could be done asynchronously.
releaseSyncLatch();
return canTableBeEnabled;
} | 3.68 |
hbase_RegionPlan_getRegionName | /**
* Get the encoded region name for the region this plan is for.
* @return Encoded region name
*/
public String getRegionName() {
return this.hri.getEncodedName();
} | 3.68 |
framework_Window_removeWindowModeChangeListener | /**
* Removes the WindowModeChangeListener from the window.
*
* @param listener
* the WindowModeChangeListener to remove.
*/
@Deprecated
public void removeWindowModeChangeListener(
WindowModeChangeListener listener) {
removeListener(WindowModeChangeEvent.class, listener,
WindowModeChangeListener.windowModeChangeMethod);
} | 3.68 |
framework_QueryBuilder_getWhereStringForFilter | /**
* Constructs and returns a string representing the filter that can be used
* in a WHERE clause.
*
* @param filter
* the filter to translate
* @param sh
* the statement helper to update with the value(s) of the filter
* @return a string representing the filter.
*/
public static synchronized String getWhereStringForFilter(Filter filter,
StatementHelper sh) {
for (FilterTranslator ft : filterTranslators) {
if (ft.translatesFilter(filter)) {
return ft.getWhereStringForFilter(filter, sh);
}
}
return "";
} | 3.68 |
flink_CatalogManager_dropTemporaryView | /**
* Drop a temporary view in a given fully qualified path.
*
* @param objectIdentifier The fully qualified path of the view to drop.
* @param ignoreIfNotExists If false exception will be thrown if the view to be dropped does not
* exist.
*/
public void dropTemporaryView(ObjectIdentifier objectIdentifier, boolean ignoreIfNotExists) {
dropTemporaryTableInternal(
objectIdentifier,
(table) -> table instanceof CatalogView,
ignoreIfNotExists,
false);
} | 3.68 |
flink_EmbeddedRocksDBStateBackend_configure | /**
* Creates a copy of this state backend that uses the values defined in the configuration for
* fields where that were not yet specified in this state backend.
*
* @param config The configuration.
* @param classLoader The class loader.
* @return The re-configured variant of the state backend
*/
@Override
public EmbeddedRocksDBStateBackend configure(ReadableConfig config, ClassLoader classLoader) {
return new EmbeddedRocksDBStateBackend(this, config, classLoader);
} | 3.68 |
framework_DownloadStream_setFileName | /**
* Sets the file name.
*
* @param fileName
* the file name to set.
*/
public void setFileName(String fileName) {
this.fileName = fileName;
} | 3.68 |
flink_JoinInputSideSpec_joinKeyContainsUniqueKey | /** Returns true if the join key contains the unique key of the input. */
public boolean joinKeyContainsUniqueKey() {
return joinKeyContainsUniqueKey;
} | 3.68 |
flink_HiveParserASTNode_getOrigin | /**
* @return information about the object from which this HiveParserASTNode originated, or null if
* this HiveParserASTNode was not expanded from an object reference
*/
public HiveParserASTNodeOrigin getOrigin() {
return origin;
} | 3.68 |
framework_AbstractConnector_getConnectorId | /*
* (non-Javadoc)
*
* @see com.vaadin.client.Connector#getId()
*/
@Override
public String getConnectorId() {
return id;
} | 3.68 |
hmily_MongodbXaRepository_convert | /**
* Convert hmily xa recovery.
*
* @param entity the entity
* @return the hmily xa recovery
*/
private HmilyXaRecovery convert(final XaRecoveryMongoEntity entity) {
return HmilyXaRecoveryImpl.convert(entity);
} | 3.68 |
hbase_DeadServer_getTimeOfDeath | /**
* Get the time when a server died
* @param deadServerName the dead server name
* @return the date when the server died
*/
public synchronized Date getTimeOfDeath(final ServerName deadServerName) {
Long time = deadServers.get(deadServerName);
return time == null ? null : new Date(time);
} | 3.68 |
AreaShop_ImportJob_message | /**
* Send a message to a target, prefixed by the default chat prefix.
* @param key The key of the language string
* @param replacements The replacements to insert in the message
*/
public void message(String key, Object... replacements) {
plugin.message(sender, key, replacements);
if(!(sender instanceof ConsoleCommandSender)) {
plugin.message(Bukkit.getConsoleSender(), key, replacements);
}
} | 3.68 |
hbase_HRegion_setReadOnly | /**
* Set flags that make this region read-only.
* @param onOff flip value for region r/o setting
*/
synchronized void setReadOnly(final boolean onOff) {
this.writesEnabled = !onOff;
this.readOnly = onOff;
} | 3.68 |
flink_CopyOnWriteStateMap_snapshotMapArrays | /**
* Creates (combined) copy of the table arrays for a snapshot. This method must be called by the
* same Thread that does modifications to the {@link CopyOnWriteStateMap}.
*/
@VisibleForTesting
@SuppressWarnings("unchecked")
StateMapEntry<K, N, S>[] snapshotMapArrays() {
// we guard against concurrent modifications of highestRequiredSnapshotVersion between
// snapshot and release.
// Only stale reads of from the result of #releaseSnapshot calls are ok. This is why we must
// call this method
// from the same thread that does all the modifications to the map.
synchronized (snapshotVersions) {
// increase the map version for copy-on-write and register the snapshot
if (++stateMapVersion < 0) {
// this is just a safety net against overflows, but should never happen in practice
// (i.e., only after 2^31 snapshots)
throw new IllegalStateException(
"Version count overflow in CopyOnWriteStateMap. Enforcing restart.");
}
highestRequiredSnapshotVersion = stateMapVersion;
snapshotVersions.add(highestRequiredSnapshotVersion);
}
StateMapEntry<K, N, S>[] table = primaryTable;
// In order to reuse the copied array as the destination array for the partitioned records
// in
// CopyOnWriteStateMapSnapshot.TransformedSnapshotIterator, we need to make sure that the
// copied array
// is big enough to hold the flattened entries. In fact, given the current rehashing
// algorithm, we only
// need to do this check when isRehashing() is false, but in order to get a more robust
// code(in case that
// the rehashing algorithm may changed in the future), we do this check for all the case.
final int totalMapIndexSize = rehashIndex + table.length;
final int copiedArraySize = Math.max(totalMapIndexSize, size());
final StateMapEntry<K, N, S>[] copy = new StateMapEntry[copiedArraySize];
if (isRehashing()) {
// consider both maps for the snapshot, the rehash index tells us which part of the two
// maps we need
final int localRehashIndex = rehashIndex;
final int localCopyLength = table.length - localRehashIndex;
// for the primary table, take every index >= rhIdx.
System.arraycopy(table, localRehashIndex, copy, 0, localCopyLength);
// for the new table, we are sure that two regions contain all the entries:
// [0, rhIdx[ AND [table.length / 2, table.length / 2 + rhIdx[
table = incrementalRehashTable;
System.arraycopy(table, 0, copy, localCopyLength, localRehashIndex);
System.arraycopy(
table,
table.length >>> 1,
copy,
localCopyLength + localRehashIndex,
localRehashIndex);
} else {
// we only need to copy the primary table
System.arraycopy(table, 0, copy, 0, table.length);
}
return copy;
} | 3.68 |
framework_ClickEventHandler_fireClick | /**
* Sends the click event based on the given native event. Delegates actual
* sending to {@link #fireClick(MouseEventDetails)}.
*
* @param event
* The native event that caused this click event
*/
@Override
protected void fireClick(NativeEvent event) {
MouseEventDetails mouseDetails = MouseEventDetailsBuilder
.buildMouseEventDetails(event, getRelativeToElement());
fireClick(event, mouseDetails);
} | 3.68 |
dubbo_AdaptiveClassCodeGenerator_generateMethodArguments | /**
* generate method arguments
*/
private String generateMethodArguments(Method method) {
Class<?>[] pts = method.getParameterTypes();
return IntStream.range(0, pts.length)
.mapToObj(i -> String.format(CODE_METHOD_ARGUMENT, pts[i].getCanonicalName(), i))
.collect(Collectors.joining(", "));
} | 3.68 |
morf_XmlDataSetConsumer_createContentHandler | /**
* @param outputStream The output
* @return A content handler
* @throws IOException When there's an XML error
*/
private ContentHandler createContentHandler(OutputStream outputStream) throws IOException {
Properties outputProperties = OutputPropertiesFactory.getDefaultMethodProperties(Method.XML);
outputProperties.setProperty("indent", "yes");
outputProperties.setProperty(OutputPropertiesFactory.S_KEY_INDENT_AMOUNT, "2");
outputProperties.setProperty(OutputPropertiesFactory.S_KEY_LINE_SEPARATOR, "\n");
Serializer serializer = SerializerFactory.getSerializer(outputProperties);
serializer.setOutputStream(outputStream);
return serializer.asContentHandler();
} | 3.68 |
framework_Navigator_beforeViewChange | /**
* Check whether view change is allowed by view change listeners (
* {@link ViewChangeListener#beforeViewChange(ViewChangeEvent)}).
*
* This method can be overridden to extend the behavior, and should not be
* called directly except by {@link #navigateTo(View, String, String)}.
*
* @since 7.6
* @param event
* the event to fire as the before view change event
* @return true if view change is allowed
*/
protected boolean beforeViewChange(ViewChangeEvent event) {
return fireBeforeViewChange(event);
} | 3.68 |
dubbo_ClassHelper_isGetter | /**
* @see org.apache.dubbo.common.utils.MethodUtils#isGetter(Method) (Method)
* @deprecated Replace to <code>MethodUtils#isGetter(Method)</code>
*/
public static boolean isGetter(Method method) {
return MethodUtils.isGetter(method);
} | 3.68 |
querydsl_JPAListAccessVisitor_shorten | /**
* Shorten the parent path to a length of max 2 elements
*/
private Path<?> shorten(Path<?> path, boolean outer) {
if (aliases.containsKey(path)) {
return aliases.get(path);
} else if (path.getMetadata().isRoot()) {
return path;
} else if (path.getMetadata().getParent().getMetadata().isRoot() && outer) {
return path;
} else {
Class<?> type = JPAQueryMixin.getElementTypeOrType(path);
Path<?> parent = shorten(path.getMetadata().getParent(), false);
Path oldPath = ExpressionUtils.path(path.getType(),
new PathMetadata(parent, path.getMetadata().getElement(), path.getMetadata().getPathType()));
if (oldPath.getMetadata().getParent().getMetadata().isRoot() && outer) {
return oldPath;
} else {
Path newPath = ExpressionUtils.path(type, ExpressionUtils.createRootVariable(oldPath));
aliases.put(path, newPath);
metadata.addJoin(JoinType.LEFTJOIN, ExpressionUtils.as(oldPath, newPath));
return newPath;
}
}
} | 3.68 |
hbase_MetaRegionLocationCache_getMetaRegionLocations | /** Returns Optional list of HRegionLocations for meta replica(s), null if the cache is empty. */
public List<HRegionLocation> getMetaRegionLocations() {
ConcurrentNavigableMap<Integer, HRegionLocation> snapshot =
cachedMetaLocations.tailMap(cachedMetaLocations.firstKey());
if (snapshot.isEmpty()) {
// This could be possible if the master has not successfully initialized yet or meta region
// is stuck in some weird state.
return Collections.emptyList();
}
List<HRegionLocation> result = new ArrayList<>();
// Explicitly iterate instead of new ArrayList<>(snapshot.values()) because the underlying
// ArrayValueCollection does not implement toArray().
snapshot.values().forEach(location -> result.add(location));
return result;
} | 3.68 |
hbase_TableSplit_getLocations | /**
* Returns the region's location as an array.
* @return The array containing the region location.
* @see org.apache.hadoop.mapreduce.InputSplit#getLocations()
*/
@Override
public String[] getLocations() {
return new String[] { regionLocation };
} | 3.68 |
hadoop_TimelineEntityReaderFactory_createMultipleEntitiesReader | /**
* Creates a timeline entity reader instance for reading set of entities with
* the specified input and predicates.
*
* @param context Reader context which defines the scope in which query has to
* be made.
* @param filters Filters which limit the entities returned.
* @param dataToRetrieve Data to retrieve for each entity.
* @return An implementation of <cite>TimelineEntityReader</cite> object
* depending on entity type.
*/
public static TimelineEntityReader createMultipleEntitiesReader(
TimelineReaderContext context, TimelineEntityFilters filters,
TimelineDataToRetrieve dataToRetrieve) {
// currently the types that are handled separate from the generic entity
// table are application, flow run, and flow activity entities
if (!context.isGenericEntity()) {
if (TimelineEntityType.
YARN_APPLICATION.matches(context.getEntityType())) {
return new ApplicationEntityReader(context, filters, dataToRetrieve);
} else if (TimelineEntityType.
YARN_FLOW_ACTIVITY.matches(context.getEntityType())) {
return new FlowActivityEntityReader(context, filters, dataToRetrieve);
} else if (TimelineEntityType.
YARN_FLOW_RUN.matches(context.getEntityType())) {
return new FlowRunEntityReader(context, filters, dataToRetrieve);
}
}
if (context.getDoAsUser() != null) {
return new SubApplicationEntityReader(context, filters, dataToRetrieve);
}
// assume we're dealing with a generic entity read
return new GenericEntityReader(context, filters, dataToRetrieve);
} | 3.68 |
hbase_HFileSystem_useHBaseChecksum | /**
* Are we verifying checksums in HBase?
* @return True, if hbase is configured to verify checksums, otherwise false.
*/
public boolean useHBaseChecksum() {
return useHBaseChecksum;
} | 3.68 |
flink_WindowAssigner_getDefaultTrigger | /**
* Returns the default trigger associated with this {@code WindowAssigner}.
*
* <p>1. If you override {@code getDefaultTrigger()}, the {@code getDefaultTrigger()} will be
* invoked and the {@code getDefaultTrigger(StreamExecutionEnvironment env)} won't be invoked.
* 2. If you don't override {@code getDefaultTrigger()}, the {@code
* getDefaultTrigger(StreamExecutionEnvironment env)} will be invoked in the default
* implementation of the {@code getDefaultTrigger()}.
*/
public Trigger<T, W> getDefaultTrigger() {
return getDefaultTrigger(new StreamExecutionEnvironment());
} | 3.68 |
AreaShop_RegionGroup_saveRequired | /**
* Indicates this file needs to be saved, will actually get saved later by a task.
*/
public void saveRequired() {
plugin.getFileManager().saveGroupsIsRequired();
} | 3.68 |
flink_DynamicSourceUtils_pushWatermarkAssigner | /** Creates a specialized node for assigning watermarks. */
private static void pushWatermarkAssigner(FlinkRelBuilder relBuilder, ResolvedSchema schema) {
final ExpressionConverter converter = new ExpressionConverter(relBuilder);
final RelDataType inputRelDataType = relBuilder.peek().getRowType();
// schema resolver has checked before that only one spec exists
final WatermarkSpec watermarkSpec = schema.getWatermarkSpecs().get(0);
final String rowtimeColumn = watermarkSpec.getRowtimeAttribute();
final int rowtimeColumnIdx = inputRelDataType.getFieldNames().indexOf(rowtimeColumn);
final RexNode watermarkRexNode = watermarkSpec.getWatermarkExpression().accept(converter);
relBuilder.watermark(rowtimeColumnIdx, watermarkRexNode);
} | 3.68 |
flink_Channel_getSerializer | /**
* Gets the serializer from this Channel.
*
* @return The serializer.
*/
public TypeSerializerFactory<?> getSerializer() {
return serializer;
} | 3.68 |
hbase_RefCountingMap_remove | /**
* Decrements the ref count of k, and removes from map if ref count == 0.
* @param k the key to remove
* @return the value associated with the specified key or null if key is removed from map.
*/
V remove(K k) {
Payload<V> p = map.computeIfPresent(k, (k1, v) -> --v.refCount <= 0 ? null : v);
return p == null ? null : p.v;
} | 3.68 |
querydsl_Alias_$ | /**
* Convert the given alias to an expression
*
* @param arg alias
* @param <D>
* @return expression
*/
@SuppressWarnings("unchecked")
@Nullable
public static <D> EntityPathBase<D> $(D arg) {
final Object current = aliasFactory.getCurrentAndReset();
if (arg instanceof EntityPath<?>) {
return (EntityPathBase<D>) arg; //NOSONAR
} else if (arg instanceof ManagedObject) {
return (EntityPathBase<D>) ((ManagedObject) arg).__mappedPath();
} else {
return (EntityPathBase<D>) current;
}
} | 3.68 |
hbase_CellUtil_copyFamilyTo | /**
* Copies the family to the given bytebuffer
* @param cell the cell whose family has to be copied
* @param destination the destination bytebuffer to which the family has to be copied
* @param destinationOffset the offset in the destination bytebuffer
* @return the offset of the bytebuffer after the copy has happened
*/
public static int copyFamilyTo(Cell cell, ByteBuffer destination, int destinationOffset) {
byte fLen = cell.getFamilyLength();
if (cell instanceof ByteBufferExtendedCell) {
ByteBufferUtils.copyFromBufferToBuffer(((ByteBufferExtendedCell) cell).getFamilyByteBuffer(),
destination, ((ByteBufferExtendedCell) cell).getFamilyPosition(), destinationOffset, fLen);
} else {
ByteBufferUtils.copyFromArrayToBuffer(destination, destinationOffset, cell.getFamilyArray(),
cell.getFamilyOffset(), fLen);
}
return destinationOffset + fLen;
} | 3.68 |
flink_BigIntComparator_putNormalizedKey | /**
* Adds a normalized key containing the normalized number of bits and MSBs of the given record.
* 1 bit determines the sign (negative, zero/positive), 31 bit the bit length of the record.
* Remaining bytes contain the most significant bits of the record.
*/
@Override
public void putNormalizedKey(BigInteger record, MemorySegment target, int offset, int len) {
// add normalized bit length (the larger the length, the larger the value)
int bitLen = 0;
if (len > 0) {
final int signum = record.signum();
bitLen = record.bitLength();
// normalize dependent on sign
// from 0 to Integer.MAX
// OR from Integer.MAX to 0
int normBitLen = signum < 0 ? Integer.MAX_VALUE - bitLen : bitLen;
// add sign
if (signum >= 0) {
normBitLen |= (1 << 31);
}
for (int i = 0; i < 4 && len > 0; i++, len--) {
final byte b = (byte) (normBitLen >>> (8 * (3 - i)));
target.put(offset++, b);
}
}
// fill remaining bytes with most significant bits
int bitPos = bitLen - 1;
for (; len > 0; len--) {
byte b = 0;
for (int bytePos = 0; bytePos < 8 && bitPos >= 0; bytePos++, bitPos--) {
b <<= 1;
if (record.testBit(bitPos)) {
b |= 1;
}
}
// the last byte might be partially filled, but that's ok within an equal bit length.
// no need for padding bits.
target.put(offset++, b);
}
} | 3.68 |
hbase_MonitoredRPCHandlerImpl_getRPC | /**
* Produces a string representation of the method currently being serviced by this Handler.
* @param withParams toggle inclusion of parameters in the RPC String
* @return A human-readable string representation of the method call.
*/
@Override
public synchronized String getRPC(boolean withParams) {
if (getState() != State.RUNNING) {
// no RPC is currently running
return "";
}
StringBuilder buffer = new StringBuilder(256);
buffer.append(methodName);
if (withParams) {
buffer.append("(");
for (int i = 0; i < params.length; i++) {
if (i != 0) buffer.append(", ");
buffer.append(params[i]);
}
buffer.append(")");
}
return buffer.toString();
} | 3.68 |
hbase_IndividualBytesFieldCell_getFamilyArray | // 2) Family
@Override
public byte[] getFamilyArray() {
// Family could be null
return (family == null) ? HConstants.EMPTY_BYTE_ARRAY : family;
} | 3.68 |
morf_AbstractSqlDialectTest_testCastFunctionToBigInt | /**
* Tests the output of a cast of a function to a big int.
*/
@Test
public void testCastFunctionToBigInt() {
String result = testDialect.getSqlFrom(new Cast(min(field("value")), DataType.BIG_INTEGER, 10));
assertEquals(expectedBigIntFunctionCast(), result);
} | 3.68 |
morf_DataSetProducerBuilderImpl_table | /**
* @see org.alfasoftware.morf.metadata.DataSetUtils.DataSetProducerBuilder#table(java.lang.String, java.util.List)
*/
@Override
public DataSetProducerBuilder table(String tableName, Record... records) {
table(tableName, Arrays.asList(records));
return this;
} | 3.68 |
flink_JobVertex_getSlotSharingGroup | /**
* Gets the slot sharing group that this vertex is associated with. Different vertices in the
* same slot sharing group can run one subtask each in the same slot.
*
* @return The slot sharing group to associate the vertex with
*/
public SlotSharingGroup getSlotSharingGroup() {
if (slotSharingGroup == null) {
// create a new slot sharing group for this vertex if it was in no other slot sharing
// group.
// this should only happen in testing cases at the moment because production code path
// will
// always set a value to it before used
setSlotSharingGroup(new SlotSharingGroup());
}
return slotSharingGroup;
} | 3.68 |
hbase_WALPrettyPrinter_setOutputOnlyRowKey | /**
* Option to print the row key only in case you just need the row keys from the WAL
*/
public void setOutputOnlyRowKey() {
this.outputOnlyRowKey = true;
} | 3.68 |
rocketmq-connect_ConnectorPluginsResource_reloadPlugins | /**
* reload plugins
*
* @param context
*/
public void reloadPlugins(Context context) {
try {
connectController.reloadPlugins();
context.json(new HttpResponse<>(context.status(), "Plugin reload succeeded"));
} catch (Exception ex) {
log.error("Reload plugin failed .", ex);
context.json(new ErrorMessage(HttpStatus.INTERNAL_SERVER_ERROR_500, ex.getMessage()));
}
} | 3.68 |
rocketmq-connect_RocketMqAdminUtil_offsets | /**
* Get topic offsets
*
* @param config
* @param topic
* @return
*/
public static Map<MessageQueue, TopicOffset> offsets(RocketMqConfig config, String topic) {
// Get db schema topic min and max offset
DefaultMQAdminExt adminClient = null;
try {
adminClient = RocketMqAdminUtil.startMQAdminTool(config);
TopicStatsTable topicStatsTable = adminClient.examineTopicStats(topic);
return topicStatsTable.getOffsetTable();
} catch (MQClientException | MQBrokerException | RemotingException | InterruptedException e) {
throw new RuntimeException(e);
} finally {
if (adminClient != null) {
adminClient.shutdown();
}
}
} | 3.68 |
flink_TypeSerializerSnapshot_writeVersionedSnapshot | /**
* Writes the given snapshot to the out stream. One should always use this method to write
* snapshots out, rather than directly calling {@link #writeSnapshot(DataOutputView)}.
*
* <p>The snapshot written with this method can be read via {@link
* #readVersionedSnapshot(DataInputView, ClassLoader)}.
*/
static void writeVersionedSnapshot(DataOutputView out, TypeSerializerSnapshot<?> snapshot)
throws IOException {
out.writeUTF(snapshot.getClass().getName());
out.writeInt(snapshot.getCurrentVersion());
snapshot.writeSnapshot(out);
} | 3.68 |
framework_VFlash_setSlotHeightAndWidth | /**
* Set dimensions of the containing layout slot so that the size of the
* embed object can be calculated from percentages if needed.
*
* Triggers embed resizing if percentage sizes are in use.
*
* @since 7.7.8
* @param slotOffsetHeight
* offset height of the layout slot
* @param slotOffsetWidth
* offset width of the layout slot
*/
public void setSlotHeightAndWidth(int slotOffsetHeight,
int slotOffsetWidth) {
this.slotOffsetHeight = slotOffsetHeight;
this.slotOffsetWidth = slotOffsetWidth;
if (hasPercentageHeight() || hasPercentageWidth()) {
resizeEmbedElement();
}
} | 3.68 |
flink_TextElement_code | /**
* Creates a block of text formatted as code.
*
* @param text a block of text that will be formatted as code
* @return block of text formatted as code
*/
public static TextElement code(String text) {
TextElement element = text(text);
element.textStyles.add(TextStyle.CODE);
return element;
} | 3.68 |
hudi_HiveSchemaUtil_convertField | /**
* Convert one field data type of parquet schema into an equivalent Hive schema.
*
* @param parquetType : Single parquet field
* @return : Equivalent sHive schema
*/
private static String convertField(final Type parquetType, boolean supportTimestamp, boolean doFormat) {
StringBuilder field = new StringBuilder();
if (parquetType.isPrimitive()) {
final PrimitiveType.PrimitiveTypeName parquetPrimitiveTypeName =
parquetType.asPrimitiveType().getPrimitiveTypeName();
final OriginalType originalType = parquetType.getOriginalType();
if (originalType == OriginalType.DECIMAL) {
final DecimalMetadata decimalMetadata = parquetType.asPrimitiveType().getDecimalMetadata();
return field.append("DECIMAL(").append(decimalMetadata.getPrecision()).append(doFormat ? " , " : ",")
.append(decimalMetadata.getScale()).append(")").toString();
} else if (originalType == OriginalType.DATE) {
return field.append("DATE").toString();
} else if (supportTimestamp && (originalType == OriginalType.TIMESTAMP_MICROS || originalType == OriginalType.TIMESTAMP_MILLIS)) {
return field.append("TIMESTAMP").toString();
}
// TODO - fix the method naming here
return parquetPrimitiveTypeName.convert(new PrimitiveType.PrimitiveTypeNameConverter<String, RuntimeException>() {
@Override
public String convertBOOLEAN(PrimitiveType.PrimitiveTypeName primitiveTypeName) {
return BOOLEAN_TYPE_NAME;
}
@Override
public String convertINT32(PrimitiveType.PrimitiveTypeName primitiveTypeName) {
return INT_TYPE_NAME;
}
@Override
public String convertINT64(PrimitiveType.PrimitiveTypeName primitiveTypeName) {
return BIGINT_TYPE_NAME;
}
@Override
public String convertINT96(PrimitiveType.PrimitiveTypeName primitiveTypeName) {
return "timestamp-millis";
}
@Override
public String convertFLOAT(PrimitiveType.PrimitiveTypeName primitiveTypeName) {
return FLOAT_TYPE_NAME;
}
@Override
public String convertDOUBLE(PrimitiveType.PrimitiveTypeName primitiveTypeName) {
return DOUBLE_TYPE_NAME;
}
@Override
public String convertFIXED_LEN_BYTE_ARRAY(PrimitiveType.PrimitiveTypeName primitiveTypeName) {
return BINARY_TYPE_NAME;
}
@Override
public String convertBINARY(PrimitiveType.PrimitiveTypeName primitiveTypeName) {
if (originalType == OriginalType.UTF8 || originalType == OriginalType.ENUM) {
return STRING_TYPE_NAME;
} else {
return BINARY_TYPE_NAME;
}
}
});
} else {
GroupType parquetGroupType = parquetType.asGroupType();
OriginalType originalType = parquetGroupType.getOriginalType();
if (originalType != null) {
switch (originalType) {
case LIST:
if (parquetGroupType.getFieldCount() != 1) {
throw new UnsupportedOperationException("Invalid list type " + parquetGroupType);
}
Type elementType = parquetGroupType.getType(0);
if (!elementType.isRepetition(Type.Repetition.REPEATED)) {
throw new UnsupportedOperationException("Invalid list type " + parquetGroupType);
}
return createHiveArray(elementType, parquetGroupType.getName(), supportTimestamp, doFormat);
case MAP:
if (parquetGroupType.getFieldCount() != 1 || parquetGroupType.getType(0).isPrimitive()) {
throw new UnsupportedOperationException("Invalid map type " + parquetGroupType);
}
GroupType mapKeyValType = parquetGroupType.getType(0).asGroupType();
if (!mapKeyValType.isRepetition(Type.Repetition.REPEATED)
|| !mapKeyValType.getOriginalType().equals(OriginalType.MAP_KEY_VALUE)
|| mapKeyValType.getFieldCount() != 2) {
throw new UnsupportedOperationException("Invalid map type " + parquetGroupType);
}
Type keyType = mapKeyValType.getType(0);
if (!keyType.isPrimitive()
|| !keyType.asPrimitiveType().getPrimitiveTypeName().equals(PrimitiveType.PrimitiveTypeName.BINARY)
|| !keyType.getOriginalType().equals(OriginalType.UTF8)) {
throw new UnsupportedOperationException("Map key type must be binary (UTF8): " + keyType);
}
Type valueType = mapKeyValType.getType(1);
return createHiveMap(convertField(keyType, supportTimestamp, doFormat), convertField(valueType, supportTimestamp, doFormat), doFormat);
case ENUM:
case UTF8:
return STRING_TYPE_NAME;
case MAP_KEY_VALUE:
// MAP_KEY_VALUE was supposed to be used to annotate key and
// value group levels in a
// MAP. However, that is always implied by the structure of
// MAP. Hence, PARQUET-113
// dropped the requirement for having MAP_KEY_VALUE.
default:
throw new UnsupportedOperationException("Cannot convert Parquet type " + parquetType);
}
} else {
// if no original type then it's a record
return createHiveStruct(parquetGroupType.getFields(), supportTimestamp, doFormat);
}
}
} | 3.68 |
pulsar_PulsarAdmin_builder | /**
* Get a new builder instance that can used to configure and build a {@link PulsarAdmin} instance.
*
* @return the {@link PulsarAdminBuilder}
*
*/
static PulsarAdminBuilder builder() {
return DefaultImplementation.newAdminClientBuilder();
} | 3.68 |
flink_KeyedStateFactory_createOrUpdateInternalState | /**
* Creates or updates internal state and returns a new {@link InternalKvState}.
*
* @param namespaceSerializer TypeSerializer for the state namespace.
* @param stateDesc The {@code StateDescriptor} that contains the name of the state.
* @param snapshotTransformFactory factory of state snapshot transformer.
* @param allowFutureMetadataUpdates whether allow metadata to update in the future or not.
* @param <N> The type of the namespace.
* @param <SV> The type of the stored state value.
* @param <SEV> The type of the stored state value or entry for collection types (list or map).
* @param <S> The type of the public API state.
* @param <IS> The type of internal state.
*/
@Nonnull
default <N, SV, SEV, S extends State, IS extends S> IS createOrUpdateInternalState(
@Nonnull TypeSerializer<N> namespaceSerializer,
@Nonnull StateDescriptor<S, SV> stateDesc,
@Nonnull StateSnapshotTransformFactory<SEV> snapshotTransformFactory,
boolean allowFutureMetadataUpdates)
throws Exception {
if (allowFutureMetadataUpdates) {
throw new UnsupportedOperationException(
this.getClass().getName() + "doesn't support to allow future metadata update");
} else {
return createOrUpdateInternalState(
namespaceSerializer, stateDesc, snapshotTransformFactory);
}
} | 3.68 |
morf_SqlServerDialect_getColumnDefaultConstraintName | /**
* Get the name of the DEFAULT constraint for a column.
*
* @param table The table on which the column exists.
* @param column The column to get the name for.
* @return The name of the DEFAULT constraint for the column on the table.
*/
private String getColumnDefaultConstraintName(final Table table, final Column column) {
return table.getName() + "_" + column.getName() + "_DF";
} | 3.68 |
framework_GridSingleSelect_isDeselectAllowed | /**
* Gets whether it's allowed to deselect the selected row through the UI.
*
* @return <code>true</code> if deselection is allowed; otherwise
* <code>false</code>
*/
public boolean isDeselectAllowed() {
return model.isDeselectAllowed();
} | 3.68 |
flink_BinaryStringData_numBytesForFirstByte | /**
* Returns the number of bytes for a code point with the first byte as `b`.
*
* @param b The first byte of a code point
*/
static int numBytesForFirstByte(final byte b) {
if (b >= 0) {
// 1 byte, 7 bits: 0xxxxxxx
return 1;
} else if ((b >> 5) == -2 && (b & 0x1e) != 0) {
// 2 bytes, 11 bits: 110xxxxx 10xxxxxx
return 2;
} else if ((b >> 4) == -2) {
// 3 bytes, 16 bits: 1110xxxx 10xxxxxx 10xxxxxx
return 3;
} else if ((b >> 3) == -2) {
// 4 bytes, 21 bits: 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
return 4;
} else {
// Skip the first byte disallowed in UTF-8
// Handling errors quietly, same semantics to java String.
return 1;
}
} | 3.68 |
hbase_MasterQuotaManager_setQuota | /*
* ========================================================================== Admin operations to
* manage the quota table
*/
public SetQuotaResponse setQuota(final SetQuotaRequest req)
throws IOException, InterruptedException {
checkQuotaSupport();
if (req.hasUserName()) {
userLocks.lock(req.getUserName());
try {
if (req.hasTableName()) {
setUserQuota(req.getUserName(), ProtobufUtil.toTableName(req.getTableName()), req);
} else if (req.hasNamespace()) {
setUserQuota(req.getUserName(), req.getNamespace(), req);
} else {
setUserQuota(req.getUserName(), req);
}
} finally {
userLocks.unlock(req.getUserName());
}
} else if (req.hasTableName()) {
TableName table = ProtobufUtil.toTableName(req.getTableName());
tableLocks.lock(table);
try {
setTableQuota(table, req);
} finally {
tableLocks.unlock(table);
}
} else if (req.hasNamespace()) {
namespaceLocks.lock(req.getNamespace());
try {
setNamespaceQuota(req.getNamespace(), req);
} finally {
namespaceLocks.unlock(req.getNamespace());
}
} else if (req.hasRegionServer()) {
regionServerLocks.lock(req.getRegionServer());
try {
setRegionServerQuota(req.getRegionServer(), req);
} finally {
regionServerLocks.unlock(req.getRegionServer());
}
} else {
throw new DoNotRetryIOException(new UnsupportedOperationException(
"a user, a table, a namespace or region server must be specified"));
}
return SetQuotaResponse.newBuilder().build();
} | 3.68 |
hadoop_AbstractRMAdminRequestInterceptor_getNextInterceptor | /**
* Gets the next {@link RMAdminRequestInterceptor} in the chain.
*/
@Override
public RMAdminRequestInterceptor getNextInterceptor() {
return this.nextInterceptor;
} | 3.68 |
hibernate-validator_ValidatorFactoryConfigurationHelper_determinePropertyConfiguredConstraintMappingContributors | /**
* Returns a list with {@link ConstraintMappingContributor}s configured via the
* {@link HibernateValidatorConfiguration#CONSTRAINT_MAPPING_CONTRIBUTORS} property.
*
* Also takes into account the deprecated {@link HibernateValidatorConfiguration#CONSTRAINT_MAPPING_CONTRIBUTOR}
* property.
*
* @param properties the properties used to bootstrap the factory
*
* @return a list with property-configured {@link ConstraintMappingContributor}s; May be empty but never {@code null}
*/
static List<ConstraintMappingContributor> determinePropertyConfiguredConstraintMappingContributors(
Map<String, String> properties, ClassLoader externalClassLoader) {
@SuppressWarnings("deprecation")
String deprecatedPropertyValue = properties.get( HibernateValidatorConfiguration.CONSTRAINT_MAPPING_CONTRIBUTOR );
String propertyValue = properties.get( HibernateValidatorConfiguration.CONSTRAINT_MAPPING_CONTRIBUTORS );
if ( StringHelper.isNullOrEmptyString( deprecatedPropertyValue ) && StringHelper.isNullOrEmptyString( propertyValue ) ) {
return Collections.emptyList();
}
StringBuilder assembledPropertyValue = new StringBuilder();
if ( !StringHelper.isNullOrEmptyString( deprecatedPropertyValue ) ) {
assembledPropertyValue.append( deprecatedPropertyValue );
}
if ( !StringHelper.isNullOrEmptyString( propertyValue ) ) {
if ( assembledPropertyValue.length() > 0 ) {
assembledPropertyValue.append( "," );
}
assembledPropertyValue.append( propertyValue );
}
String[] contributorNames = assembledPropertyValue.toString().split( "," );
List<ConstraintMappingContributor> contributors = newArrayList( contributorNames.length );
for ( String contributorName : contributorNames ) {
@SuppressWarnings("unchecked")
Class<? extends ConstraintMappingContributor> contributorType = (Class<? extends ConstraintMappingContributor>) run(
LoadClass.action( contributorName, externalClassLoader ) );
contributors.add( run( NewInstance.action( contributorType, "constraint mapping contributor class" ) ) );
}
return contributors;
} | 3.68 |
hbase_KeyValue_getValueArray | /**
* Returns the backing array of the entire KeyValue (all KeyValue fields are in a single array)
*/
@Override
public byte[] getValueArray() {
return bytes;
} | 3.68 |
flink_FieldParser_resetParserState | /**
* Reset the state of the parser. Called as the very first method inside {@link
* FieldParser#resetErrorStateAndParse(byte[], int, int, byte[], Object)}, by default it just
* reset its error state.
*/
protected void resetParserState() {
this.errorState = ParseErrorState.NONE;
} | 3.68 |
hudi_HoodieMetaSyncOperations_updateTableProperties | /**
* Update the table properties in metastore.
*
* @return true if properties updated.
*/
default boolean updateTableProperties(String tableName, Map<String, String> tableProperties) {
return false;
} | 3.68 |
druid_Lexer_putChar | /**
* Append a character to sbuf.
*/
protected final void putChar(char ch) {
if (bufPos == buf.length) {
char[] newsbuf = new char[buf.length * 2];
System.arraycopy(buf, 0, newsbuf, 0, buf.length);
buf = newsbuf;
}
buf[bufPos++] = ch;
} | 3.68 |
flink_MapValue_get | /*
* (non-Javadoc)
* @see java.util.Map#get(java.lang.Object)
*/
@Override
public V get(final Object key) {
return this.map.get(key);
} | 3.68 |
dubbo_ReferenceCountedResource_release | /**
* Decreases the reference count by 1 and calls {@link this#destroy} if the reference count reaches 0.
*/
public final boolean release() {
long remainingCount = COUNTER_UPDATER.decrementAndGet(this);
if (remainingCount == 0) {
destroy();
return true;
} else if (remainingCount <= -1) {
logger.warn(PROTOCOL_ERROR_CLOSE_CLIENT, "", "", "This instance has been destroyed");
return false;
} else {
return false;
}
} | 3.68 |
hbase_RegionServerFlushTableProcedureManager_start | /**
* Start accepting flush table requests.
*/
@Override
public void start() {
LOG.debug("Start region server flush procedure manager " + rss.getServerName().toString());
this.memberRpcs.start(rss.getServerName().toString(), member);
} | 3.68 |
hbase_QuotaObserverChore_getTimeUnit | /**
* Extracts the time unit for the chore period and initial delay from the configuration. The
* configuration value for {@link #QUOTA_OBSERVER_CHORE_TIMEUNIT_KEY} must correspond to a
* {@link TimeUnit} value.
* @param conf The configuration object.
* @return The configured time unit for the chore period and initial delay or the default value.
*/
static TimeUnit getTimeUnit(Configuration conf) {
return TimeUnit
.valueOf(conf.get(QUOTA_OBSERVER_CHORE_TIMEUNIT_KEY, QUOTA_OBSERVER_CHORE_TIMEUNIT_DEFAULT));
} | 3.68 |
flink_SkipListUtils_putValueVersion | /**
* Puts the version of value to value space.
*
* @param memorySegment memory segment for value space.
* @param offset offset of value space in memory segment.
* @param version version of value.
*/
public static void putValueVersion(MemorySegment memorySegment, int offset, int version) {
memorySegment.putInt(offset + VALUE_VERSION_OFFSET, version);
} | 3.68 |
hbase_RegionMover_readServersFromFile | /**
* @param filename The file should have 'host:port' per line
* @return List of servers from the file in format 'hostname:port'.
*/
private List<String> readServersFromFile(String filename) throws IOException {
List<String> servers = new ArrayList<>();
if (filename != null) {
try {
Files.readAllLines(Paths.get(filename)).stream().map(String::trim)
.filter(((Predicate<String>) String::isEmpty).negate()).map(String::toLowerCase)
.forEach(servers::add);
} catch (IOException e) {
LOG.error("Exception while reading servers from file,", e);
throw e;
}
}
return servers;
} | 3.68 |
hadoop_MappableBlockLoader_fillBuffer | /**
* Reads bytes into a buffer until EOF or the buffer's limit is reached.
*/
protected int fillBuffer(FileChannel channel, ByteBuffer buf)
throws IOException {
int bytesRead = channel.read(buf);
if (bytesRead < 0) {
//EOF
return bytesRead;
}
while (buf.remaining() > 0) {
int n = channel.read(buf);
if (n < 0) {
//EOF
return bytesRead;
}
bytesRead += n;
}
return bytesRead;
} | 3.68 |
morf_RemoveTable_getTable | /**
* @return the {@link Table} to be removed.
*/
public Table getTable() {
return tableToBeRemoved;
} | 3.68 |
flink_AvroParquetWriters_forSpecificRecord | /**
* Creates a ParquetWriterFactory for an Avro specific type. The Parquet writers will use the
* schema of that specific type to build and write the columnar data.
*
* @param type The class of the type to write.
*/
public static <T extends SpecificRecordBase> ParquetWriterFactory<T> forSpecificRecord(
Class<T> type) {
final String schemaString = SpecificData.get().getSchema(type).toString();
final ParquetBuilder<T> builder =
(out) -> createAvroParquetWriter(schemaString, SpecificData.get(), out);
return new ParquetWriterFactory<>(builder);
} | 3.68 |
framework_MultiSelectionRenderer_reboundScrollArea | /**
* If the scroll are has been offset by the pointer starting out there,
* move it back a bit
*/
private void reboundScrollArea(double timeDiff) {
if (!scrollAreaShouldRebound) {
return;
}
int reboundPx = (int) Math
.ceil(SCROLL_AREA_REBOUND_PX_PER_MS * timeDiff);
if (topBound < finalTopBound) {
topBound += reboundPx;
topBound = Math.min(topBound, finalTopBound);
updateScrollSpeed(pageY);
} else if (bottomBound > finalBottomBound) {
bottomBound -= reboundPx;
bottomBound = Math.max(bottomBound, finalBottomBound);
updateScrollSpeed(pageY);
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.