name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_BrowserInfo_getCSSClass | /**
* Returns a string representing the browser in use, for use in CSS
* classnames. The classnames will be space separated abbreviations,
* optionally with a version appended.
*
* Abbreviations: Firefox: ff Internet Explorer: ie Safari: sa Opera: op
*
* Browsers that CSS-wise behave like each other will get the same
* abbreviation (this usually depends on the rendering engine).
*
* This is quite simple at the moment, more heuristics will be added when
* needed.
*
* Examples: Internet Explorer 6: ".v-ie .v-ie6 .v-ie60", Firefox 3.0.4:
* ".v-ff .v-ff3 .v-ff30", Opera 9.60: ".v-op .v-op9 .v-op960", Opera 10.10:
* ".v-op .v-op10 .v-op1010"
*
* @return
*/
public String getCSSClass() {
String prefix = "v-";
if (cssClass == null) {
String browserIdentifier = "";
String majorVersionClass = "";
String minorVersionClass = "";
String browserEngineClass = "";
if (browserDetails.isFirefox()) {
browserIdentifier = BROWSER_FIREFOX;
majorVersionClass = browserIdentifier
+ getBrowserMajorVersion();
minorVersionClass = majorVersionClass
+ browserDetails.getBrowserMinorVersion();
browserEngineClass = ENGINE_GECKO;
} else if (browserDetails.isChrome()) {
// TODO update when Chrome is more stable
browserIdentifier = BROWSER_SAFARI;
majorVersionClass = "ch";
browserEngineClass = ENGINE_WEBKIT;
} else if (browserDetails.isSafari()) {
browserIdentifier = BROWSER_SAFARI;
majorVersionClass = browserIdentifier
+ getBrowserMajorVersion();
minorVersionClass = majorVersionClass
+ browserDetails.getBrowserMinorVersion();
browserEngineClass = ENGINE_WEBKIT;
} else if (browserDetails.isPhantomJS()) {
// Safari needed for theme
browserIdentifier = BROWSER_SAFARI;
majorVersionClass = browserIdentifier
+ getBrowserMajorVersion();
minorVersionClass = majorVersionClass
+ browserDetails.getBrowserMinorVersion();
browserEngineClass = ENGINE_WEBKIT;
} else if (browserDetails.isIE()) {
browserIdentifier = BROWSER_IE;
majorVersionClass = browserIdentifier
+ getBrowserMajorVersion();
minorVersionClass = majorVersionClass
+ browserDetails.getBrowserMinorVersion();
browserEngineClass = ENGINE_TRIDENT;
} else if (browserDetails.isEdge()) {
browserIdentifier = BROWSER_EDGE;
majorVersionClass = browserIdentifier
+ getBrowserMajorVersion();
minorVersionClass = majorVersionClass
+ browserDetails.getBrowserMinorVersion();
browserEngineClass = "";
} else if (browserDetails.isOpera()) {
browserIdentifier = BROWSER_OPERA;
majorVersionClass = browserIdentifier
+ getBrowserMajorVersion();
minorVersionClass = majorVersionClass
+ browserDetails.getBrowserMinorVersion();
browserEngineClass = ENGINE_PRESTO;
}
cssClass = prefix + browserIdentifier;
if (!majorVersionClass.isEmpty()) {
cssClass = cssClass + " " + prefix + majorVersionClass;
}
if (!minorVersionClass.isEmpty()) {
cssClass = cssClass + " " + prefix + minorVersionClass;
}
if (!browserEngineClass.isEmpty()) {
cssClass = cssClass + " " + prefix + browserEngineClass;
}
String osClass = getOperatingSystemClass();
if (osClass != null) {
cssClass = cssClass + " " + osClass;
}
if (isTouchDevice()) {
cssClass = cssClass + " " + prefix + UI_TOUCH;
}
}
return cssClass;
} | 3.68 |
framework_ConnectorTracker_getStreamVariable | /**
* Checks if the indicated connector has a StreamVariable of the given name
* and returns the variable if one is found.
*
* @param connectorId
* @param variableName
* @return variable if a matching one exists, otherwise null
*/
public StreamVariable getStreamVariable(String connectorId,
String variableName) {
if (pidToNameToStreamVariable == null) {
return null;
}
Map<String, StreamVariable> map = pidToNameToStreamVariable
.get(connectorId);
if (map == null) {
return null;
}
StreamVariable streamVariable = map.get(variableName);
return streamVariable;
} | 3.68 |
zxing_SearchBookContentsActivity_handleSearchResults | // Currently there is no way to distinguish between a query which had no results and a book
// which is not searchable - both return zero results.
private void handleSearchResults(JSONObject json) {
try {
int count = json.getInt("number_of_results");
headerView.setText(getString(R.string.msg_sbc_results) + " : " + count);
if (count > 0) {
JSONArray results = json.getJSONArray("search_results");
SearchBookContentsResult.setQuery(queryTextView.getText().toString());
List<SearchBookContentsResult> items = new ArrayList<>(count);
for (int x = 0; x < count; x++) {
items.add(parseResult(results.getJSONObject(x)));
}
resultListView.setOnItemClickListener(new BrowseBookListener(SearchBookContentsActivity.this, items));
resultListView.setAdapter(new SearchBookContentsAdapter(SearchBookContentsActivity.this, items));
} else {
String searchable = json.optString("searchable");
if ("false".equals(searchable)) {
headerView.setText(R.string.msg_sbc_book_not_searchable);
}
resultListView.setAdapter(null);
}
} catch (JSONException e) {
Log.w(TAG, "Bad JSON from book search", e);
resultListView.setAdapter(null);
headerView.setText(R.string.msg_sbc_failed);
}
} | 3.68 |
hbase_MultiByteBuff_capacity | /** Returns the total capacity of this MultiByteBuffer. */
@Override
public int capacity() {
checkRefCount();
int c = 0;
for (ByteBuffer item : this.items) {
c += item.capacity();
}
return c;
} | 3.68 |
framework_VAccordion_getStackItem | /**
* For internal use only. May be removed or replaced in the future.
*
* @param index
* the index of the stack item to get
* @return the stack item
*/
public StackItem getStackItem(int index) {
return (StackItem) getWidget(index);
} | 3.68 |
framework_FocusableGrid_focus | /**
* Focus the panel.
*/
@Override
public void focus() {
setFocus(true);
} | 3.68 |
framework_DragAndDropService_handleDropRequest | /**
* Handles a drop request from the VDragAndDropManager.
*
* @param dropTarget
* @param variables
*/
private void handleDropRequest(DropTarget dropTarget,
Map<String, Object> variables) {
DropHandler dropHandler = dropTarget.getDropHandler();
if (dropHandler == null) {
// No dropHandler returned so no drop can be performed.
getLogger().log(Level.FINE,
"DropTarget.getDropHandler() returned null for owner: {0}",
dropTarget);
return;
}
/*
* Construct the Transferable and the DragDropDetails for the drop
* operation based on the info passed from the client widgets (drag
* source for Transferable, drop target for DragDropDetails).
*/
Transferable transferable = constructTransferable(variables);
TargetDetails dropData = constructDragDropDetails(dropTarget,
variables);
DragAndDropEvent dropEvent = new DragAndDropEvent(transferable,
dropData);
if (dropHandler.getAcceptCriterion().accept(dropEvent)) {
dropHandler.drop(dropEvent);
}
} | 3.68 |
pulsar_RangeCache_evictLeastAccessedEntries | /**
*
* @param minSize
* @return a pair containing the number of entries evicted and their total size
*/
public Pair<Integer, Long> evictLeastAccessedEntries(long minSize) {
checkArgument(minSize > 0);
long removedSize = 0;
int removedEntries = 0;
while (removedSize < minSize) {
Map.Entry<Key, Value> entry = entries.pollFirstEntry();
if (entry == null) {
break;
}
Value value = entry.getValue();
++removedEntries;
removedSize += weighter.getSize(value);
value.release();
}
size.addAndGet(-removedSize);
return Pair.of(removedEntries, removedSize);
} | 3.68 |
hudi_FileSystemViewManager_createInMemoryFileSystemView | /**
* Create an in-memory file System view for a table.
*
*/
private static HoodieTableFileSystemView createInMemoryFileSystemView(HoodieMetadataConfig metadataConfig, FileSystemViewStorageConfig viewConf,
HoodieTableMetaClient metaClient, SerializableFunctionUnchecked<HoodieTableMetaClient, HoodieTableMetadata> metadataCreator) {
LOG.info("Creating InMemory based view for basePath " + metaClient.getBasePathV2());
HoodieTimeline timeline = metaClient.getActiveTimeline().filterCompletedAndCompactionInstants();
if (metaClient.getTableConfig().isMetadataTableAvailable()) {
ValidationUtils.checkArgument(metadataCreator != null, "Metadata supplier is null. Cannot instantiate metadata file system view");
return new HoodieMetadataFileSystemView(metaClient, timeline, metadataCreator.apply(metaClient));
}
if (metaClient.getMetaserverConfig().isMetaserverEnabled()) {
return (HoodieTableFileSystemView) ReflectionUtils.loadClass(HOODIE_METASERVER_FILE_SYSTEM_VIEW_CLASS,
new Class<?>[] {HoodieTableMetaClient.class, HoodieTimeline.class, HoodieMetaserverConfig.class},
metaClient, timeline, metaClient.getMetaserverConfig());
}
return new HoodieTableFileSystemView(metaClient, timeline, viewConf.isIncrementalTimelineSyncEnabled());
} | 3.68 |
flink_MapView_clear | /** Removes all entries of this map. */
@Override
public void clear() {
map.clear();
} | 3.68 |
dubbo_ClassUtils_isAssignableFrom | /**
* the semantics is same as {@link Class#isAssignableFrom(Class)}
*
* @param superType the super type
* @param targetType the target type
* @return see {@link Class#isAssignableFrom(Class)}
* @since 2.7.6
*/
public static boolean isAssignableFrom(Class<?> superType, Class<?> targetType) {
// any argument is null
if (superType == null || targetType == null) {
return false;
}
// equals
if (Objects.equals(superType, targetType)) {
return true;
}
// isAssignableFrom
return superType.isAssignableFrom(targetType);
} | 3.68 |
hibernate-validator_SizeValidatorForMap_isValid | /**
* Checks the number of entries in a map.
*
* @param map The map to validate.
* @param constraintValidatorContext context in which the constraint is evaluated.
*
* @return Returns {@code true} if the map is {@code null} or the number of entries in {@code map}
* is between the specified {@code min} and {@code max} values (inclusive),
* {@code false} otherwise.
*/
@Override
public boolean isValid(Map map, ConstraintValidatorContext constraintValidatorContext) {
if ( map == null ) {
return true;
}
int size = map.size();
return size >= min && size <= max;
} | 3.68 |
dubbo_MessageFormatter_deeplyAppendParameter | // special treatment of array values was suggested by 'lizongbo'
private static void deeplyAppendParameter(StringBuffer sbuf, Object o, Map<Object[], Void> seenMap) {
if (o == null) {
sbuf.append("null");
return;
}
if (!o.getClass().isArray()) {
safeObjectAppend(sbuf, o);
} else {
// check for primitive array types because they
// unfortunately cannot be cast to Object[]
if (o instanceof boolean[]) {
booleanArrayAppend(sbuf, (boolean[]) o);
} else if (o instanceof byte[]) {
byteArrayAppend(sbuf, (byte[]) o);
} else if (o instanceof char[]) {
charArrayAppend(sbuf, (char[]) o);
} else if (o instanceof short[]) {
shortArrayAppend(sbuf, (short[]) o);
} else if (o instanceof int[]) {
intArrayAppend(sbuf, (int[]) o);
} else if (o instanceof long[]) {
longArrayAppend(sbuf, (long[]) o);
} else if (o instanceof float[]) {
floatArrayAppend(sbuf, (float[]) o);
} else if (o instanceof double[]) {
doubleArrayAppend(sbuf, (double[]) o);
} else {
objectArrayAppend(sbuf, (Object[]) o, seenMap);
}
}
} | 3.68 |
hbase_HFileCorruptionChecker_getMissing | /**
* @return the set of paths that were missing. Likely due to deletion/moves from compaction or
* flushes.
*/
public Collection<Path> getMissing() {
return new HashSet<>(missing);
} | 3.68 |
hibernate-validator_HibernateConstraintViolationBuilder_enableExpressionLanguage | /**
* Enable Expression Language with the default Expression Language feature level for the constraint violation
* created by this builder if the chosen {@code MessageInterpolator} supports it.
* <p>
* If you enable this, you need to make sure your message template does not contain any unescaped user input (such as
* the validated value): use {@code addExpressionVariable()} to inject properly escaped variables into the template.
*
* @since 6.2
*/
@Incubating
default HibernateConstraintViolationBuilder enableExpressionLanguage() {
return enableExpressionLanguage( ExpressionLanguageFeatureLevel.DEFAULT );
} | 3.68 |
Activiti_BaseEntityEventListener_onCreate | /**
* Called when an entity create event is received.
*/
protected void onCreate(ActivitiEvent event) {
// Default implementation is a NO-OP
} | 3.68 |
framework_AbsoluteLayout_getRightValue | /**
* Gets the 'right' attributes value in current units.
*
* @return The value of the 'right' attribute, null if not set
* @see #getRightUnits()
*/
public Float getRightValue() {
return rightValue;
} | 3.68 |
hbase_PermissionStorage_parsePermissions | /**
* Parse and filter permission based on the specified column family, column qualifier and user
* name.
*/
private static ListMultimap<String, UserPermission> parsePermissions(byte[] entryName,
Result result, byte[] cf, byte[] cq, String user, boolean hasFilterUser) {
ListMultimap<String, UserPermission> perms = ArrayListMultimap.create();
if (result != null && result.size() > 0) {
for (Cell kv : result.rawCells()) {
Pair<String, Permission> permissionsOfUserOnTable =
parsePermissionRecord(entryName, kv, cf, cq, hasFilterUser, user);
if (permissionsOfUserOnTable != null) {
String username = permissionsOfUserOnTable.getFirst();
Permission permission = permissionsOfUserOnTable.getSecond();
perms.put(username, new UserPermission(username, permission));
}
}
}
return perms;
} | 3.68 |
hbase_JVMClusterUtil_createRegionServerThread | /**
* Creates a {@link RegionServerThread}. Call 'start' on the returned thread to make it run.
* @param c Configuration to use.
* @param hrsc Class to create.
* @param index Used distinguishing the object returned.
* @return Region server added.
*/
public static JVMClusterUtil.RegionServerThread createRegionServerThread(final Configuration c,
final Class<? extends HRegionServer> hrsc, final int index) throws IOException {
HRegionServer server;
try {
Constructor<? extends HRegionServer> ctor = hrsc.getConstructor(Configuration.class);
ctor.setAccessible(true);
server = ctor.newInstance(c);
} catch (InvocationTargetException ite) {
Throwable target = ite.getTargetException();
throw new RuntimeException("Failed construction of RegionServer: " + hrsc.toString()
+ ((target.getCause() != null) ? target.getCause().getMessage() : ""), target);
} catch (Exception e) {
throw new IOException(e);
}
return new JVMClusterUtil.RegionServerThread(server, index);
} | 3.68 |
hudi_MarkerCreationDispatchingRunnable_run | /**
* Dispatches the marker creation requests that can be process to a worker thread of batch
* processing the requests.
*
* For each marker directory, goes through the following steps:
* (1) find the next available file index for writing. If no file index is available,
* skip the processing of this marker directory;
* (2) fetch the pending marker creation requests for this marker directory. If there is
* no request, skip this marker directory;
* (3) put the marker directory, marker dir state, list of requests futures, and the file index
* to a {@code MarkerDirRequestContext} instance and add the instance to the request context list.
*
* If the request context list is not empty, spins up a worker thread, {@code MarkerCreationBatchingRunnable},
* and pass all the request context to the thread for batch processing. The thread is responsible
* for responding to the request futures directly.
*/
@Override
public void run() {
List<BatchedMarkerCreationContext> requestContextList = new ArrayList<>();
// Only fetch pending marker creation requests that can be processed,
// i.e., that markers can be written to a underlying file
// markerDirStateMap is used in other thread, need to ensure thread safety
for (Map.Entry<String, MarkerDirState> entry : markerDirStateMap.entrySet()) {
String markerDir = entry.getKey();
MarkerDirState markerDirState = entry.getValue();
Option<Integer> fileIndex = markerDirState.getNextFileIndexToUse();
if (!fileIndex.isPresent()) {
LOG.debug("All marker files are busy, skip batch processing of create marker requests in " + markerDir);
continue;
}
List<MarkerCreationFuture> futures = markerDirState.fetchPendingMarkerCreationRequests();
if (futures.isEmpty()) {
markerDirState.markFileAsAvailable(fileIndex.get());
continue;
}
requestContextList.add(
new BatchedMarkerCreationContext(markerDir, markerDirState, futures, fileIndex.get()));
}
if (requestContextList.size() > 0) {
executorService.execute(
new BatchedMarkerCreationRunnable(requestContextList));
}
} | 3.68 |
morf_DataSourceAdapter_getParentLogger | /**
* @see javax.sql.CommonDataSource#getParentLogger()
*/
@Override
public Logger getParentLogger() throws SQLFeatureNotSupportedException {
throw new UnsupportedOperationException("Log writer not supported by ConnectionDetails");
} | 3.68 |
hadoop_RoleModel_directory | /**
* Given a directory path, return the S3 resource to it.
* @param path a path
* @return a resource for a statement.
*/
public static String[] directory(Path path) {
String host = path.toUri().getHost();
String key = pathToKey(path);
if (!key.isEmpty()) {
return new String[] {
resource(host, key + "/", true),
resource(host, key, false),
resource(host, key + "/", false),
};
} else {
return new String[]{
resource(host, key, true),
};
}
} | 3.68 |
pulsar_PulsarClientImpl_newTransaction | // This method should be exposed in the PulsarClient interface. Only expose it when all the transaction features
// are completed.
// @Override
public TransactionBuilder newTransaction() {
return new TransactionBuilderImpl(this, tcClient);
} | 3.68 |
hbase_RackManager_getRack | /**
* Same as {@link #getRack(ServerName)} except that a list is passed
* @param servers list of servers we're requesting racks information for
* @return list of racks for the given list of servers
*/
public List<String> getRack(List<ServerName> servers) {
// just a note - switchMapping caches results (at least the implementation should unless the
// resolution is really a lightweight process)
List<String> serversAsString = new ArrayList<>(servers.size());
for (ServerName server : servers) {
serversAsString.add(server.getHostname());
}
List<String> racks = switchMapping.resolve(serversAsString);
return racks;
} | 3.68 |
flink_GenericDataSinkBase_addInputs | /**
* Adds to the input the union of the given operators.
*
* @param inputs The operator(s) to be unioned with the input.
* @deprecated This method will be removed in future versions. Use the {@link
* org.apache.flink.api.common.operators.Union} operator instead.
*/
@SuppressWarnings("unchecked")
@Deprecated
public void addInputs(List<? extends Operator<IN>> inputs) {
checkNotNull(inputs, "The inputs may not be null.");
this.input =
createUnionCascade(
this.input, (Operator<IN>[]) inputs.toArray(new Operator[inputs.size()]));
} | 3.68 |
pulsar_JettyRequestLogFactory_createRequestLogger | /**
* Build a new Jetty request logger using the format defined in this class.
* @return a request logger
*/
public static CustomRequestLog createRequestLogger() {
return new CustomRequestLog(new Slf4jRequestLogWriter(), LOG_FORMAT);
} | 3.68 |
hbase_TableSchemaModel___getReadOnly | /** Returns true if READONLY attribute exists and is truel */
public boolean __getReadOnly() {
Object o = attrs.get(READONLY);
return o != null ? Boolean.parseBoolean(o.toString()) : TableDescriptorBuilder.DEFAULT_READONLY;
} | 3.68 |
pulsar_AbstractCmdConsume_updateConfig | /**
* Set client configuration.
*
*/
public void updateConfig(ClientBuilder clientBuilder, Authentication authentication, String serviceURL) {
this.clientBuilder = clientBuilder;
this.authentication = authentication;
this.serviceURL = serviceURL;
} | 3.68 |
hbase_WALPrettyPrinter_setRegionFilter | /**
* sets the region by which output will be filtered when not null, serves as a filter; only log
* entries from this region will be printed
*/
public void setRegionFilter(String region) {
this.region = region;
} | 3.68 |
flink_SkipListUtils_putKeyData | /**
* Puts the key data into key space.
*
* @param segment memory segment for key space.
* @param offset offset of key space in memory segment.
* @param keySegment memory segment for key data.
* @param keyOffset offset of key data in memory segment.
* @param keyLen length of key data.
* @param level level of the key.
*/
public static void putKeyData(
MemorySegment segment,
int offset,
MemorySegment keySegment,
int keyOffset,
int keyLen,
int level) {
keySegment.copyTo(keyOffset, segment, offset + getKeyDataOffset(level), keyLen);
} | 3.68 |
morf_Upgrade_findPath | /**
* Find an upgrade path.
*
* @param targetSchema Target schema to upgrade to.
* @param upgradeSteps All available upgrade steps.
* @param exceptionRegexes Regular expression for table exclusions.
* @param exclusiveExecutionSteps names of the upgrade step classes which should
* be executed in an exclusive way
* @param dataSource The data source to use to find the upgrade path
* @return The upgrade path available
*/
public UpgradePath findPath(Schema targetSchema, Collection<Class<? extends UpgradeStep>> upgradeSteps, Collection<String> exceptionRegexes, Set<String> exclusiveExecutionSteps, DataSource dataSource) {
final List<String> upgradeStatements = new ArrayList<>();
ResultSetProcessor<Long> upgradeAuditRowProcessor = resultSet -> {resultSet.next(); return resultSet.getLong(1);};
long upgradeAuditCount = getUpgradeAuditRowCount(upgradeAuditRowProcessor); //fetch a number of upgrade steps applied previously to do optimistic locking check later
//Return an upgradePath with the current upgrade status if one is in progress
UpgradeStatus status = upgradeStatusTableService.getStatus(Optional.of(dataSource));
if (status != NONE) {
return new UpgradePath(status);
}
// -- Validate the target schema...
//
new SchemaValidator().validate(targetSchema);
// Get access to the schema we are starting from
log.info("Reading current schema");
Schema sourceSchema = UpgradeHelper.copySourceSchema(connectionResources, dataSource, exceptionRegexes);
SqlDialect dialect = connectionResources.sqlDialect();
// -- Get the current UUIDs and deployed views...
log.info("Examining current views"); //
ExistingViewStateLoader existingViewState = new ExistingViewStateLoader(dialect, new ExistingViewHashLoader(dataSource, dialect), viewDeploymentValidator);
Result viewChangeInfo = existingViewState.viewChanges(sourceSchema, targetSchema);
ViewChanges viewChanges = new ViewChanges(targetSchema.views(), viewChangeInfo.getViewsToDrop(), viewChangeInfo.getViewsToDeploy());
// -- Determine if an upgrade path exists between the two schemas...
//
log.info("Searching for upgrade path from [" + sourceSchema + "] to [" + targetSchema + "]");
ExistingTableStateLoader existingTableState = new ExistingTableStateLoader(dataSource, dialect);
UpgradePathFinder pathFinder = new UpgradePathFinder(upgradeSteps, existingTableState.loadAppliedStepUUIDs());
pathFinder.findDiscrepancies(getUpgradeAuditRecords());
SchemaChangeSequence schemaChangeSequence;
status = upgradeStatusTableService.getStatus(Optional.of(dataSource));
if (status != NONE) {
return new UpgradePath(status);
}
try {
schemaChangeSequence = pathFinder.determinePath(sourceSchema, targetSchema, exceptionRegexes);
} catch (NoUpgradePathExistsException e) {
log.debug("No upgrade path found - checking upgrade status", e);
status = upgradeStatusTableService.getStatus(Optional.of(dataSource));
if (status != NONE) {
log.info("Schema differences found, but upgrade in progress - no action required until upgrade is complete");
return new UpgradePath(status);
}
else if (upgradeAuditCount != getUpgradeAuditRowCount(upgradeAuditRowProcessor)) {
//In the meantime another node managed to finish the upgrade steps and flip the status back to NONE. Assuming the upgrade was in progress on another node.
log.info("Schema differences found, but upgrade was progressed on another node - no action required");
return new UpgradePath(UpgradeStatus.IN_PROGRESS);
}
else {
throw e;
}
}
// -- Only run the upgrader if there are any steps to apply...
//
if (!schemaChangeSequence.getUpgradeSteps().isEmpty()) {
// Run the upgrader over all the ElementarySchemaChanges in the upgrade steps
InlineTableUpgrader upgrader = new InlineTableUpgrader(sourceSchema, dialect, new SqlStatementWriter() {
@Override
public void writeSql(Collection<String> sql) {
upgradeStatements.addAll(sql);
}
}, SqlDialect.IdTable.withPrefix(dialect, "temp_id_"));
upgrader.preUpgrade();
schemaChangeSequence.applyTo(upgrader);
upgrader.postUpgrade();
}
// -- Upgrade path...
//
List<UpgradeStep> upgradesToApply = new ArrayList<>(schemaChangeSequence.getUpgradeSteps());
// Placeholder upgrade step if no other upgrades - or we drop & recreate everything
if (upgradesToApply.isEmpty() && !viewChanges.isEmpty()) {
upgradesToApply.add(new UpgradeStep() {
@Override public String getJiraId() { return "\u2014"; }
@Override public String getDescription() { return "Update database views"; }
@Override public void execute(SchemaEditor schema, DataEditor data) { /* No changes */ }
});
} else if (!upgradesToApply.isEmpty()) {
viewChanges = viewChanges.droppingAlso(sourceSchema.views()).deployingAlso(targetSchema.views());
}
// Prepare GraphBasedUpgradeBuilder, not supported in the static context (graphBasedUpgradeBuilderFactory = null).
// The builder should be created even when there are no steps to run, for the view rebuild case.
GraphBasedUpgradeBuilder graphBasedUpgradeBuilder = null;
if (graphBasedUpgradeBuilderFactory != null) {
graphBasedUpgradeBuilder = graphBasedUpgradeBuilderFactory.create(
sourceSchema,
targetSchema,
connectionResources,
exclusiveExecutionSteps,
schemaChangeSequence,
viewChanges);
}
// Build the actual upgrade path
return buildUpgradePath(connectionResources, sourceSchema, targetSchema, upgradeStatements, viewChanges, upgradesToApply, graphBasedUpgradeBuilder, upgradeAuditCount);
} | 3.68 |
morf_UpgradeTableResolution_isPortableSqlStatementUsed | /**
* @param upgradeStepName name of the class of the upgrade step to be checked
* @return true if given upgrade step is using {@link PortableSqlStatement} or null if this upgrade
* step hasn't been processed
*/
public Boolean isPortableSqlStatementUsed(String upgradeStepName) {
return resolvedTablesMap.get(upgradeStepName) == null ? null : resolvedTablesMap.get(upgradeStepName).isPortableSqlStatementUsed();
} | 3.68 |
morf_SelectStatementBuilder_union | /**
* Perform an UNION set operation with another {@code selectStatement},
* eliminating any duplicate rows.
*
* <p>It is possible to have more than one union statement by chaining union calls:</p>
* <blockquote><pre>
* SelectStatement stmtA = select(...).from(...);
* SelectStatement stmtB = select(...).from(...);
* SelectStatement stmtC = select(...).from(...).union(stmtA).union(stmtB).orderBy(...);
* </pre></blockquote>
*
* <p>If an union operation is performed then all
* participating select statements require the same selected column list, i.e.
* same naming and ordering. In addition, only the leftmost select statement
* should have an order-by statement (see example above).</p>
*
* @param selectStatement the select statement to be united with the current select statement;
* @return this, for method chaining.
*/
public SelectStatementBuilder union(SelectStatement selectStatement) {
setOperators.add(new UnionSetOperator(UnionStrategy.DISTINCT, this.build(), selectStatement));
return this;
} | 3.68 |
hbase_TableState_isDisabled | /** Returns True if table is disabled. */
public boolean isDisabled() {
return isInStates(State.DISABLED);
} | 3.68 |
flink_ArrowWriter_getFieldWriters | /** Gets the field writers. */
public ArrowFieldWriter<IN>[] getFieldWriters() {
return fieldWriters;
} | 3.68 |
dubbo_ReferenceBuilder_services | /**
* @param service one service name
* @param otherServices other service names
* @return {@link ReferenceBuilder}
* @since 2.7.8
*/
public ReferenceBuilder<T> services(String service, String... otherServices) {
this.services = toCommaDelimitedString(service, otherServices);
return getThis();
} | 3.68 |
morf_AbstractSqlDialectTest_testInsertWithIdAndVersion | /**
* Tests that an insert from a select works when no defaults are supplied for the id or version columns.
*/
@Test
public void testInsertWithIdAndVersion() {
SelectStatement sourceStmt = new SelectStatement(new FieldReference(STRING_FIELD))
.from(new TableReference(OTHER_TABLE));
InsertStatement stmt = new InsertStatement().into(new TableReference(TEST_TABLE))
.fields(new FieldReference(STRING_FIELD))
.from(sourceStmt);
List<String> sql = testDialect.convertStatementToSQL(stmt, metadata, SqlDialect.IdTable.withDeterministicName(ID_VALUES_TABLE));
assertSQLEquals("Insert from a select with no default for id", expectedInsertWithIdAndVersion(), sql);
} | 3.68 |
morf_RenameIndex_accept | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.upgrade.SchemaChange#accept(org.alfasoftware.morf.upgrade.SchemaChangeVisitor)
*/
@Override
public void accept(SchemaChangeVisitor visitor) {
visitor.visit(this);
} | 3.68 |
dubbo_ClassUtils_getDeclaredMethodNames | /**
* get method name array.
*
* @return method name array.
*/
public static String[] getDeclaredMethodNames(Class<?> tClass) {
if (tClass == Object.class) {
return OBJECT_METHODS;
}
Method[] methods =
Arrays.stream(tClass.getMethods()).collect(Collectors.toList()).toArray(new Method[] {});
List<String> dmns = new ArrayList<>(); // method names.
boolean hasMethod = hasMethods(methods);
if (hasMethod) {
for (Method m : methods) {
// ignore Object's method.
if (m.getDeclaringClass() == Object.class) {
continue;
}
String mn = m.getName();
if (m.getDeclaringClass() == tClass) {
dmns.add(mn);
}
}
}
dmns.sort(Comparator.naturalOrder());
return dmns.toArray(new String[0]);
} | 3.68 |
hbase_SnapshotScannerHDFSAclHelper_removeNamespaceDefaultAcl | /**
* Remove default acl from namespace archive dir when delete namespace
* @param namespace the namespace
* @param removeUsers the users whose default acl will be removed
* @return false if an error occurred, otherwise true
*/
public boolean removeNamespaceDefaultAcl(String namespace, Set<String> removeUsers) {
try {
long start = EnvironmentEdgeManager.currentTime();
Path archiveNsDir = pathHelper.getArchiveNsDir(namespace);
HDFSAclOperation operation = new HDFSAclOperation(fs, archiveNsDir, removeUsers,
HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT);
operation.handleAcl();
LOG.info("Remove HDFS acl when delete namespace {}, cost {} ms", namespace,
EnvironmentEdgeManager.currentTime() - start);
return true;
} catch (Exception e) {
LOG.error("Remove HDFS acl error when delete namespace {}", namespace, e);
return false;
}
} | 3.68 |
AreaShop_GeneralRegion_update | /**
* Broadcast an event to indicate that region settings have been changed.
* This will update region flags, signs, etc.
*/
public void update() {
Bukkit.getServer().getPluginManager().callEvent(new UpdateRegionEvent(this));
} | 3.68 |
hadoop_FsAction_not | /**
* NOT operation.
* @return FsAction.
*/
public FsAction not() {
return vals[7 - ordinal()];
} | 3.68 |
morf_AbstractSqlDialectTest_expectedRightTrim | /**
* @return The expected SQL for a Left Trim
*/
protected String expectedRightTrim() {
return "SELECT RTRIM(field1) FROM " + tableName("schedule");
} | 3.68 |
graphhopper_AbstractSRTMElevationProvider_toShort | // we need big endianess to read the SRTM files
final short toShort(byte[] b, int offset) {
return (short) ((b[offset] & 0xFF) << 8 | (b[offset + 1] & 0xFF));
} | 3.68 |
framework_GridMultiSelect_setUserSelectionAllowed | /**
* Sets whether the user is allowed to change the selection.
* <p>
* The check is done only for the client side actions. It doesn't affect
* selection requests sent from the server side.
*
* @param allowed
* <code>true</code> if the user is allowed to change the
* selection, <code>false</code> otherwise
*/
public void setUserSelectionAllowed(boolean allowed) {
model.setUserSelectionAllowed(allowed);
} | 3.68 |
framework_GridLayout_setColumnExpandRatio | /**
* Sets the expand ratio of given column.
*
* <p>
* The expand ratio defines how excess space is distributed among columns.
* Excess space means space that is left over from components that are not
* sized relatively. By default, the excess space is distributed evenly.
* </p>
*
* <p>
* Note, that width of this GridLayout needs to be defined (fixed or
* relative, as opposed to undefined height) for this method to have any
* effect.
* <p>
* Note that checking for relative width for the child components is done on
* the server so you cannot set a child component to have undefined width on
* the server and set it to <code>100%</code> in CSS. You must set it to
* <code>100%</code> on the server.
*
* @see #setWidth(float, Unit)
*
* @param columnIndex
* The column index, starting from 0 for the leftmost row.
* @param ratio
* the expand ratio
*/
public void setColumnExpandRatio(int columnIndex, float ratio) {
columnExpandRatio.put(columnIndex, ratio);
getState().explicitColRatios.add(columnIndex);
markAsDirty();
} | 3.68 |
flink_TypeStrategies_matchFamily | /** Type strategy that returns the given argument if it is of the same logical type family. */
public static TypeStrategy matchFamily(int argumentPos, LogicalTypeFamily family) {
return new MatchFamilyTypeStrategy(argumentPos, family);
} | 3.68 |
hbase_Result_isStale | /**
* Whether or not the results are coming from possibly stale data. Stale results might be returned
* if {@link Consistency} is not STRONG for the query.
* @return Whether or not the results are coming from possibly stale data.
*/
public boolean isStale() {
return stale;
} | 3.68 |
hbase_RegionInfo_isLast | /** Returns True if this is last Region in Table */
default boolean isLast() {
return Bytes.equals(getEndKey(), HConstants.EMPTY_END_ROW);
} | 3.68 |
flink_InputProperty_hashDistribution | /**
* The input will read the records whose keys hash to a particular hash value.
*
* @param keys hash keys
*/
public static HashDistribution hashDistribution(int[] keys) {
return new HashDistribution(keys);
} | 3.68 |
hadoop_AbstractS3ACommitter_abortJobInternal | /**
* The internal job abort operation; can be overridden in tests.
* This must clean up operations; it is called when a commit fails, as
* well as in an {@link #abortJob(JobContext, JobStatus.State)} call.
* The base implementation calls {@link #cleanup(CommitContext, boolean)}
* so cleans up the filesystems and destroys the thread pool.
* Subclasses must always invoke this superclass method after their
* own operations.
* Creates and closes its own commit context.
*
* @param commitContext commit context
* @param suppressExceptions should exceptions be suppressed?
* @throws IOException any IO problem raised when suppressExceptions is false.
*/
protected void abortJobInternal(CommitContext commitContext,
boolean suppressExceptions)
throws IOException {
cleanup(commitContext, suppressExceptions);
} | 3.68 |
hadoop_CMgrUpdateContainersEvent_getContainersToUpdate | /**
* Get containers to update.
* @return List of containers to update.
*/
public List<Container> getContainersToUpdate() {
return this.containersToUpdate;
} | 3.68 |
morf_UpdateStatement_where | /**
* Specifies the where criteria.
*
* <blockquote><pre>
* update([table])
* .set([fields])
* .where([criteria]);</pre></blockquote>
*
* @param criterion the criteria to filter the results by
* @return a statement with the changes applied.
*/
public UpdateStatement where(Criterion criterion) {
if (AliasedField.immutableDslEnabled()) {
return shallowCopy().where(criterion).build();
} else {
if (criterion == null)
throw new IllegalArgumentException("Criterion was null in where clause");
whereCriterion = criterion;
return this;
}
} | 3.68 |
hbase_Query_getColumnFamilyTimeRange | /** Returns A map of column families to time ranges */
public Map<byte[], TimeRange> getColumnFamilyTimeRange() {
return this.colFamTimeRangeMap;
} | 3.68 |
hmily_HmilyRepositoryEventPublisher_publishEvent | /**
* Publish event.
*
* @param hmilyParticipant the hmily participant
* @param type the type
*/
public void publishEvent(final HmilyParticipant hmilyParticipant, final int type) {
HmilyRepositoryEvent event = new HmilyRepositoryEvent();
event.setType(type);
event.setTransId(hmilyParticipant.getTransId());
event.setHmilyParticipant(hmilyParticipant);
push(event);
} | 3.68 |
hmily_MetricsReporter_registerCounter | /**
* Register counter.
*
* @param name name
* @param document document for counter
*/
public static void registerCounter(final String name, final String document) {
registerCounter(name, null, document);
} | 3.68 |
dubbo_TriDecoder_processBody | /**
* Processes the GRPC message body, which depending on frame header flags may be compressed.
*/
private void processBody() {
// There is no reliable way to get the uncompressed size per message when it's compressed,
// because the uncompressed bytes are provided through an InputStream whose total size is
// unknown until all bytes are read, and we don't know when it happens.
byte[] stream = compressedFlag ? getCompressedBody() : getUncompressedBody();
listener.onRawMessage(stream);
// Done with this frame, begin processing the next header.
state = GrpcDecodeState.HEADER;
requiredLength = HEADER_LENGTH;
} | 3.68 |
flink_DeltaIterationBase_isSolutionSetUnManaged | /**
* gets whether the solution set is in managed or unmanaged memory.
*
* @return True, if the solution set is in unmanaged memory (object heap), false if in managed
* memory.
* @see #setSolutionSetUnManaged(boolean)
*/
public boolean isSolutionSetUnManaged() {
return solutionSetUnManaged;
} | 3.68 |
framework_VMenuBar_isNavigationSelectKey | /**
* Checks whether key code selects a menu item. By default it is the Enter
* and Space keys but by overriding this you can change the keys to whatever
* you want.
*
* @since 7.2
* @param keycode
* @return true if key selects menu item
*/
protected boolean isNavigationSelectKey(int keycode) {
return keycode == getNavigationSelectKey()
|| keycode == KeyCodes.KEY_SPACE;
} | 3.68 |
hadoop_YarnVersionInfo_getBranch | /**
* Get the branch on which this originated.
* @return The branch name, e.g. "trunk" or "branches/branch-0.20"
*/
public static String getBranch() {
return YARN_VERSION_INFO._getBranch();
} | 3.68 |
flink_DuplicatingCheckpointOutputStream_closeAndGetPrimaryHandle | /** Returns the state handle from the {@link #primaryOutputStream}. */
public StreamStateHandle closeAndGetPrimaryHandle() throws IOException {
flushInternalBuffer();
return primaryOutputStream.closeAndGetHandle();
} | 3.68 |
querydsl_ExpressionUtils_predicateTemplate | /**
* Create a new Template expression
*
* @param template template
* @param args template parameters
* @return template expression
*/
public static PredicateTemplate predicateTemplate(Template template, List<?> args) {
return new PredicateTemplate(template, args);
} | 3.68 |
zilla_ManyToOneRingBuffer_size | /**
* {@inheritDoc}
*/
public int size()
{
long headBefore;
long tail;
long headAfter = buffer.getLongVolatile(headPositionIndex);
do
{
headBefore = headAfter;
tail = buffer.getLongVolatile(tailPositionIndex);
headAfter = buffer.getLongVolatile(headPositionIndex);
}
while (headAfter != headBefore);
return (int)(tail - headAfter);
} | 3.68 |
hbase_Procedure_updateMetricsOnSubmit | /**
* This function will be called just when procedure is submitted for execution. Override this
* method to update the metrics at the beginning of the procedure. The default implementation
* updates submitted counter if {@link #getProcedureMetrics(Object)} returns non-null
* {@link ProcedureMetrics}.
*/
protected void updateMetricsOnSubmit(TEnvironment env) {
ProcedureMetrics metrics = getProcedureMetrics(env);
if (metrics == null) {
return;
}
Counter submittedCounter = metrics.getSubmittedCounter();
if (submittedCounter != null) {
submittedCounter.increment();
}
} | 3.68 |
dubbo_AbstractConfig_refresh | /**
* Dubbo config property override
*/
public void refresh() {
if (needRefresh) {
try {
// check and init before do refresh
preProcessRefresh();
refreshWithPrefixes(getPrefixes(), getConfigMode());
} catch (Exception e) {
logger.error(
COMMON_FAILED_OVERRIDE_FIELD,
"",
"",
"Failed to override field value of config bean: " + this,
e);
throw new IllegalStateException("Failed to override field value of config bean: " + this, e);
}
postProcessRefresh();
}
refreshed.set(true);
} | 3.68 |
flink_SimpleVersionedSerialization_writeVersionAndSerialize | /**
* Serializes the version and datum into a byte array. The first four bytes will be occupied by
* the version (as returned by {@link SimpleVersionedSerializer#getVersion()}), written in
* <i>big-endian</i> encoding. The remaining bytes will be the serialized datum, as produced by
* {@link SimpleVersionedSerializer#serialize(Object)}. The resulting array will hence be four
* bytes larger than the serialized datum.
*
* <p>Data serialized via this method can be deserialized via {@link
* #readVersionAndDeSerialize(SimpleVersionedSerializer, byte[])}.
*
* @param serializer The serializer to serialize the datum with.
* @param datum The datum to serialize.
* @return A byte array containing the serialized version and serialized datum.
* @throws IOException Exceptions from the {@link SimpleVersionedSerializer#serialize(Object)}
* method are forwarded.
*/
public static <T> byte[] writeVersionAndSerialize(
SimpleVersionedSerializer<T> serializer, T datum) throws IOException {
checkNotNull(serializer, "serializer");
checkNotNull(datum, "datum");
final byte[] data = serializer.serialize(datum);
final byte[] versionAndData = new byte[data.length + 8];
final int version = serializer.getVersion();
versionAndData[0] = (byte) (version >> 24);
versionAndData[1] = (byte) (version >> 16);
versionAndData[2] = (byte) (version >> 8);
versionAndData[3] = (byte) version;
final int length = data.length;
versionAndData[4] = (byte) (length >> 24);
versionAndData[5] = (byte) (length >> 16);
versionAndData[6] = (byte) (length >> 8);
versionAndData[7] = (byte) length;
// move the data to the array
System.arraycopy(data, 0, versionAndData, 8, data.length);
return versionAndData;
} | 3.68 |
dubbo_HealthStatusManager_enterTerminalState | /**
* enterTerminalState causes the health status manager to mark all services as not serving, and
* prevents future updates to services. This method is meant to be called prior to server
* shutdown as a way to indicate that clients should redirect their traffic elsewhere.
*/
public void enterTerminalState() {
healthService.enterTerminalState();
} | 3.68 |
hbase_HRegion_getFilesystem | /** Returns {@link FileSystem} being used by this region */
public FileSystem getFilesystem() {
return fs.getFileSystem();
} | 3.68 |
framework_AbstractInMemoryContainer_fireItemsAdded | /**
* Notify item set change listeners that items has been added to the
* container.
*
* @param firstPosition
* position of the first visible added item in the view
* @param firstItemId
* id of the first visible added item
* @param numberOfItems
* the number of visible added items
*/
protected void fireItemsAdded(int firstPosition, ITEMIDTYPE firstItemId,
int numberOfItems) {
BaseItemAddEvent addEvent = new BaseItemAddEvent(this, firstItemId,
firstPosition, numberOfItems);
fireItemSetChange(addEvent);
} | 3.68 |
framework_VTabsheet_getRpcProxy | /**
* Returns the client to server RPC proxy for the tabsheet.
*
* @since 7.2
* @return RPC proxy
*/
protected TabsheetServerRpc getRpcProxy() {
return connector.getRpcProxy(TabsheetServerRpc.class);
} | 3.68 |
hbase_IdentityTableMap_map | /**
* Pass the key, value to reduce
*/
public void map(ImmutableBytesWritable key, Result value,
OutputCollector<ImmutableBytesWritable, Result> output, Reporter reporter) throws IOException {
// convert
output.collect(key, value);
} | 3.68 |
zxing_AlignmentPatternFinder_crossCheckVertical | /**
* <p>After a horizontal scan finds a potential alignment pattern, this method
* "cross-checks" by scanning down vertically through the center of the possible
* alignment pattern to see if the same proportion is detected.</p>
*
* @param startI row where an alignment pattern was detected
* @param centerJ center of the section that appears to cross an alignment pattern
* @param maxCount maximum reasonable number of modules that should be
* observed in any reading state, based on the results of the horizontal scan
* @return vertical center of alignment pattern, or {@link Float#NaN} if not found
*/
private float crossCheckVertical(int startI, int centerJ, int maxCount,
int originalStateCountTotal) {
BitMatrix image = this.image;
int maxI = image.getHeight();
int[] stateCount = crossCheckStateCount;
stateCount[0] = 0;
stateCount[1] = 0;
stateCount[2] = 0;
// Start counting up from center
int i = startI;
while (i >= 0 && image.get(centerJ, i) && stateCount[1] <= maxCount) {
stateCount[1]++;
i--;
}
// If already too many modules in this state or ran off the edge:
if (i < 0 || stateCount[1] > maxCount) {
return Float.NaN;
}
while (i >= 0 && !image.get(centerJ, i) && stateCount[0] <= maxCount) {
stateCount[0]++;
i--;
}
if (stateCount[0] > maxCount) {
return Float.NaN;
}
// Now also count down from center
i = startI + 1;
while (i < maxI && image.get(centerJ, i) && stateCount[1] <= maxCount) {
stateCount[1]++;
i++;
}
if (i == maxI || stateCount[1] > maxCount) {
return Float.NaN;
}
while (i < maxI && !image.get(centerJ, i) && stateCount[2] <= maxCount) {
stateCount[2]++;
i++;
}
if (stateCount[2] > maxCount) {
return Float.NaN;
}
int stateCountTotal = stateCount[0] + stateCount[1] + stateCount[2];
if (5 * Math.abs(stateCountTotal - originalStateCountTotal) >= 2 * originalStateCountTotal) {
return Float.NaN;
}
return foundPatternCross(stateCount) ? centerFromEnd(stateCount, i) : Float.NaN;
} | 3.68 |
hadoop_AlwaysRestartPolicy_hasCompleted | /**
* This is always false since these components never terminate
*
* @param component
* @return
*/
@Override public boolean hasCompleted(Component component) {
return false;
} | 3.68 |
framework_PopupDateField_isTextFieldEnabled | /**
* Checks whether the text field is enabled (default) or not.
*
* @see PopupDateField#setTextFieldEnabled(boolean);
*
* @return <b>true</b> if the text field is enabled, <b>false</b> otherwise.
*/
public boolean isTextFieldEnabled() {
return getState(false).textFieldEnabled;
} | 3.68 |
framework_TableMoveFocusWithSelection_getTicketNumber | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber()
*/
@Override
protected Integer getTicketNumber() {
return 12540;
} | 3.68 |
flink_ExceptionUtils_checkInterrupted | /**
* Checks whether the given exception is a {@link InterruptedException} and sets the interrupted
* flag accordingly.
*
* @param e to check whether it is an {@link InterruptedException}
*/
public static void checkInterrupted(Throwable e) {
if (e instanceof InterruptedException) {
Thread.currentThread().interrupt();
}
} | 3.68 |
flink_SymbolUtil_calciteToCommon | /**
* Converts from Calcite to a common symbol. The common symbol can be a publicly exposed one
* such as {@link TimeIntervalUnit} or internal one such as {@link DateTimeUtils.TimeUnitRange}.
* Since the common symbol is optional, the input is returned as a fallback.
*/
public static Enum<?> calciteToCommon(Enum<?> calciteSymbol, boolean preferInternal) {
checkCalciteSymbol(calciteSymbol);
Enum<?> internalCommonSymbol =
preferInternal ? calciteToInternalCommon.get(calciteSymbol) : null;
if (internalCommonSymbol == null) {
internalCommonSymbol = calciteToCommon.get(calciteSymbol);
if (internalCommonSymbol == null) {
// for cases that have no common representation
// e.g. TRIM
return calciteSymbol;
}
}
return internalCommonSymbol;
} | 3.68 |
morf_NamedParameterPreparedStatement_getParsedSql | /**
* For testing only.
*
* @return the parsed SQL.
*/
public String getParsedSql() {
return query;
} | 3.68 |
hadoop_AppStoreController_get | /**
* Find yarn application from solr.
*
* @param id Application ID
* @return AppEntry
*/
@GET
@Path("get/{id}")
@Produces(MediaType.APPLICATION_JSON)
public AppStoreEntry get(@PathParam("id") String id) {
AppCatalogSolrClient sc = new AppCatalogSolrClient();
return sc.findAppStoreEntry(id);
} | 3.68 |
flink_GSRecoverableWriterCommitter_cleanupTemporaryBlobs | /**
* Clean up after a successful commit operation, by deleting any temporary blobs associated with
* the final blob.
*/
private void cleanupTemporaryBlobs() {
LOGGER.trace(
"Cleaning up temporary blobs for recoverable with options {}: {}",
options,
recoverable);
// determine the partial name for the temporary objects to be deleted
String temporaryBucketName =
BlobUtils.getTemporaryBucketName(recoverable.finalBlobIdentifier, options);
String temporaryObjectPartialName =
BlobUtils.getTemporaryObjectPartialName(recoverable.finalBlobIdentifier);
// find all the temp blobs by looking for anything that starts with the temporary
// object partial name. doing it this way finds any orphaned temp blobs as well
List<GSBlobIdentifier> foundTempBlobIdentifiers =
storage.list(temporaryBucketName, temporaryObjectPartialName);
if (!foundTempBlobIdentifiers.isEmpty()) {
// delete all the temp blobs, and populate the set with ones that were actually deleted
// normalize in case the blob came back with a generation populated
storage.delete(foundTempBlobIdentifiers);
}
} | 3.68 |
pulsar_BrokerStatsBase_getTopics2 | // map support missing
@ApiResponses(value = { @ApiResponse(code = 403, message = "Don't have admin permission") })
public StreamingOutput getTopics2() throws Exception {
// Ensure super user access only
validateSuperUserAccess();
return output -> pulsar().getBrokerService().getDimensionMetrics(statsBuf -> {
try {
output.write(statsBuf.array(), statsBuf.arrayOffset(), statsBuf.readableBytes());
} catch (Exception e) {
throw new WebApplicationException(e);
}
});
}
@GET
@Path("/allocator-stats/{allocator} | 3.68 |
flink_RemoteInputChannel_releaseAllResources | /** Releases all exclusive and floating buffers, closes the partition request client. */
@Override
void releaseAllResources() throws IOException {
if (isReleased.compareAndSet(false, true)) {
final ArrayDeque<Buffer> releasedBuffers;
synchronized (receivedBuffers) {
releasedBuffers =
receivedBuffers.stream()
.map(sb -> sb.buffer)
.collect(Collectors.toCollection(ArrayDeque::new));
receivedBuffers.clear();
}
bufferManager.releaseAllBuffers(releasedBuffers);
// The released flag has to be set before closing the connection to ensure that
// buffers received concurrently with closing are properly recycled.
if (partitionRequestClient != null) {
partitionRequestClient.close(this);
} else {
connectionManager.closeOpenChannelConnections(connectionId);
}
}
} | 3.68 |
AreaShop_RentRegion_rent | /**
* Rent a region.
* @param offlinePlayer The player that wants to rent the region
* @return true if it succeeded and false if not
*/
public boolean rent(OfflinePlayer offlinePlayer) {
if(plugin.getEconomy() == null) {
message(offlinePlayer, "general-noEconomy");
return false;
}
// Check if the player has permission
if(!plugin.hasPermission(offlinePlayer, "areashop.rent")) {
message(offlinePlayer, "rent-noPermission");
return false;
}
// Check location restrictions
if(getWorld() == null) {
message(offlinePlayer, "general-noWorld");
return false;
}
if(getRegion() == null) {
message(offlinePlayer, "general-noRegion");
return false;
}
boolean extend = false;
if(getRenter() != null && offlinePlayer.getUniqueId().equals(getRenter())) {
extend = true;
}
// Check if available or extending
if (isRented() && !extend) {
message(offlinePlayer, "rent-someoneElse");
return false;
}
// These checks are only relevant for online players doing the renting/buying themselves
Player player = offlinePlayer.getPlayer();
if(player != null) {
// Check if the players needs to be in the region for renting
if(restrictedToRegion() && (!player.getWorld().getName().equals(getWorldName())
|| !getRegion().contains(player.getLocation().getBlockX(), player.getLocation().getBlockY(), player.getLocation().getBlockZ()))) {
message(offlinePlayer, "rent-restrictedToRegion");
return false;
}
// Check if the players needs to be in the world for renting
if(restrictedToWorld() && !player.getWorld().getName().equals(getWorldName())) {
message(offlinePlayer, "rent-restrictedToWorld", player.getWorld().getName());
return false;
}
}
// Check region limits if this is not extending
if(!(extend && config.getBoolean("allowRegionExtendsWhenAboveLimits"))) {
LimitResult limitResult;
if(extend) {
limitResult = this.limitsAllow(RegionType.RENT, offlinePlayer, true);
} else {
limitResult = this.limitsAllow(RegionType.RENT, offlinePlayer);
}
AreaShop.debug("LimitResult: " + limitResult.toString());
if(!limitResult.actionAllowed()) {
if(limitResult.getLimitingFactor() == LimitType.TOTAL) {
message(offlinePlayer, "total-maximum", limitResult.getMaximum(), limitResult.getCurrent(), limitResult.getLimitingGroup());
return false;
}
if(limitResult.getLimitingFactor() == LimitType.RENTS) {
message(offlinePlayer, "rent-maximum", limitResult.getMaximum(), limitResult.getCurrent(), limitResult.getLimitingGroup());
return false;
}
if(limitResult.getLimitingFactor() == LimitType.EXTEND) {
message(offlinePlayer, "rent-maximumExtend", limitResult.getMaximum(), limitResult.getCurrent() + 1, limitResult.getLimitingGroup());
return false;
}
return false;
}
}
// Check if the player can still extend this rent
if(extend && !plugin.hasPermission(offlinePlayer, "areashop.rentextendbypass")) {
if(getMaxExtends() >= 0 && getTimesExtended() >= getMaxExtends()) {
message(offlinePlayer, "rent-maxExtends");
return false;
}
}
// Check if there is enough time left before hitting maxRentTime
boolean extendToMax = false;
double price = getPrice();
long timeNow = Calendar.getInstance().getTimeInMillis();
long timeRented = 0;
long maxRentTime = getMaxRentTime();
if(isRented()) {
timeRented = getRentedUntil() - timeNow;
}
if((timeRented + getDuration()) > (maxRentTime)
&& !plugin.hasPermission(offlinePlayer, "areashop.renttimebypass")
&& maxRentTime != -1) {
// Extend to the maximum instead of adding a full period
if(getBooleanSetting("rent.extendToFullWhenAboveMaxRentTime")) {
if(timeRented >= maxRentTime) {
message(offlinePlayer, "rent-alreadyAtFull");
return false;
} else {
long toRentPart = maxRentTime - timeRented;
extendToMax = true;
price = ((double)toRentPart) / getDuration() * price;
}
} else {
message(offlinePlayer, "rent-maxRentTime");
return false;
}
}
// Check if the player has enough money
if(!plugin.getEconomy().has(offlinePlayer, getWorldName(), price)) {
if(extend) {
message(offlinePlayer, "rent-lowMoneyExtend", Utils.formatCurrency(plugin.getEconomy().getBalance(offlinePlayer, getWorldName())));
} else {
message(offlinePlayer, "rent-lowMoneyRent", Utils.formatCurrency(plugin.getEconomy().getBalance(offlinePlayer, getWorldName())));
}
return false;
}
// Broadcast and check event
RentingRegionEvent event = new RentingRegionEvent(this, offlinePlayer, extend);
Bukkit.getPluginManager().callEvent(event);
if(event.isCancelled()) {
message(offlinePlayer, "general-cancelled", event.getReason());
return false;
}
// Substract the money from the players balance
EconomyResponse r = plugin.getEconomy().withdrawPlayer(offlinePlayer, getWorldName(), price);
if(!r.transactionSuccess()) {
message(offlinePlayer, "rent-payError");
AreaShop.debug("Something went wrong with getting money from " + offlinePlayer.getName() + " while renting " + getName() + ": " + r.errorMessage);
return false;
}
// Optionally give money to the landlord
OfflinePlayer landlordPlayer = null;
if(getLandlord() != null) {
landlordPlayer = Bukkit.getOfflinePlayer(getLandlord());
}
String landlordName = getLandlordName();
if(landlordName != null) {
if(landlordPlayer != null && landlordPlayer.getName() != null) {
r = plugin.getEconomy().depositPlayer(landlordPlayer, getWorldName(), price);
} else {
r = plugin.getEconomy().depositPlayer(landlordName, getWorldName(), price);
}
if(r == null || !r.transactionSuccess()) {
AreaShop.warn("Something went wrong with paying '" + landlordName + "' " + Utils.formatCurrency(price) + " for his rent of region " + getName() + " to " + offlinePlayer.getName());
}
}
// Get the time until the region will be rented
Calendar calendar = Calendar.getInstance();
if(extendToMax) {
calendar.setTimeInMillis(calendar.getTimeInMillis() + getMaxRentTime());
} else if(extend) {
calendar.setTimeInMillis(getRentedUntil() + getDuration());
} else {
calendar.setTimeInMillis(calendar.getTimeInMillis() + getDuration());
}
// Add values to the rent and send it to FileManager
setRentedUntil(calendar.getTimeInMillis());
setRenter(offlinePlayer.getUniqueId());
updateLastActiveTime();
// Fire schematic event and updated times extended
if(!extend) {
this.handleSchematicEvent(RegionEvent.RENTED);
setTimesExtended(0);
} else {
setTimesExtended(getTimesExtended() + 1);
}
// Send message to the player
if(extendToMax) {
message(offlinePlayer, "rent-extendedToMax");
} else if(extend) {
message(offlinePlayer, "rent-extended");
} else {
message(offlinePlayer, "rent-rented");
}
// Notify about updates
this.notifyAndUpdate(new RentedRegionEvent(this, extend));
return true;
} | 3.68 |
pulsar_BrokerDiscoveryProvider_nextBroker | /**
* Find next broker {@link LoadManagerReport} in round-robin fashion.
*
* @return
* @throws PulsarServerException
*/
LoadManagerReport nextBroker() throws PulsarServerException {
List<LoadManagerReport> availableBrokers = metadataStoreCacheLoader.getAvailableBrokers();
if (availableBrokers.isEmpty()) {
throw new PulsarServerException("No active broker is available");
} else {
int brokersCount = availableBrokers.size();
int nextIdx = signSafeMod(counter.getAndIncrement(), brokersCount);
return availableBrokers.get(nextIdx);
}
} | 3.68 |
hadoop_AbfsOutputStream_hasCapability | /**
* Query the stream for a specific capability.
*
* @param capability string to query the stream support for.
* @return true for hsync and hflush.
*/
@Override
public boolean hasCapability(String capability) {
return supportFlush && isProbeForSyncable(capability);
} | 3.68 |
hbase_ZKProcedureUtil_isAbortPathNode | /**
* Is this in the procedure barrier abort znode path
*/
public boolean isAbortPathNode(String path) {
return path.startsWith(this.abortZnode) && !path.equals(abortZnode);
} | 3.68 |
morf_JdbcUrlElements_withInstanceName | /**
* Sets the instance name. Defaults to null (no instance specified).
*
* @param instanceName The instance name.
* @return this
*/
public Builder withInstanceName(String instanceName) {
this.instanceName = instanceName;
return this;
} | 3.68 |
morf_AbstractSqlDialectTest_testAlterColumnFromNotNullableToNullable | /**
* Test changing a column from not nullable to a nullable one.
*/
@Test
public void testAlterColumnFromNotNullableToNullable() {
testAlterTableColumn(TEST_TABLE, AlterationType.ALTER, getColumn(TEST_TABLE, FLOAT_FIELD), column(FLOAT_FIELD, DataType.DECIMAL, 20, 3).nullable(), expectedAlterTableAlterColumnFromNotNullableToNullableStatement());
} | 3.68 |
hadoop_BufferData_getBuffer | /**
* Gets the buffer associated with this block.
*
* @return the buffer associated with this block.
*/
public ByteBuffer getBuffer() {
return this.buffer;
} | 3.68 |
querydsl_GenericExporter_setHandleMethods | /**
* Set whether methods are handled (default true)
*
* @param b
* @deprecated Use {@link #setPropertyHandling(PropertyHandling)} instead
*/
@Deprecated
public void setHandleMethods(boolean b) {
handleMethods = b;
setPropertyHandling();
} | 3.68 |
hbase_HRegion_openHRegion | /**
* Open HRegion.
* <p/>
* Calls initialize and sets sequenceId.
* @return Returns <code>this</code>
*/
private HRegion openHRegion(final CancelableProgressable reporter) throws IOException {
try {
CompoundConfiguration cConfig =
new CompoundConfiguration().add(conf).addBytesMap(htableDescriptor.getValues());
// Refuse to open the region if we are missing local compression support
TableDescriptorChecker.checkCompression(cConfig, htableDescriptor);
// Refuse to open the region if encryption configuration is incorrect or
// codec support is missing
LOG.debug("checking encryption for " + this.getRegionInfo().getEncodedName());
TableDescriptorChecker.checkEncryption(cConfig, htableDescriptor);
// Refuse to open the region if a required class cannot be loaded
LOG.debug("checking classloading for " + this.getRegionInfo().getEncodedName());
TableDescriptorChecker.checkClassLoading(cConfig, htableDescriptor);
this.openSeqNum = initialize(reporter);
this.mvcc.advanceTo(openSeqNum);
// The openSeqNum must be increased every time when a region is assigned, as we rely on it to
// determine whether a region has been successfully reopened. So here we always write open
// marker, even if the table is read only.
if (
wal != null && getRegionServerServices() != null
&& RegionReplicaUtil.isDefaultReplica(getRegionInfo())
) {
writeRegionOpenMarker(wal, openSeqNum);
}
} catch (Throwable t) {
// By coprocessor path wrong region will open failed,
// MetricsRegionWrapperImpl is already init and not close,
// add region close when open failed
try {
// It is not required to write sequence id file when region open is failed.
// Passing true to skip the sequence id file write.
this.close(true);
} catch (Throwable e) {
LOG.warn("Open region: {} failed. Try close region but got exception ",
this.getRegionInfo(), e);
}
throw t;
}
return this;
} | 3.68 |
hbase_MasterServices_modifyTable | /**
* Modify the descriptor of an existing table
* @param tableName The table name
* @param descriptor The updated table descriptor
*/
default long modifyTable(final TableName tableName, final TableDescriptor descriptor,
final long nonceGroup, final long nonce) throws IOException {
return modifyTable(tableName, descriptor, nonceGroup, nonce, true);
} | 3.68 |
pulsar_Topics_createNonPartitionedTopicAsync | /**
* Create a non-partitioned topic asynchronously.
*
* @param topic Topic name
*/
default CompletableFuture<Void> createNonPartitionedTopicAsync(String topic) {
return createNonPartitionedTopicAsync(topic, null);
} | 3.68 |
graphhopper_VectorTileEncoder_addFeature | /**
* Add a feature with layer name (typically feature type name), some attributes
* and a Geometry. The Geometry must be in "pixel" space 0,0 upper left and
* 256,256 lower right.
* <p>
* For optimization, geometries will be clipped and simplified. Features with
* geometries outside of the tile will be skipped.
*
* @param layerName a {@link String} with the vector tile layer name.
* @param attributes a {@link Map} with the vector tile feature attributes.
* @param geometry a {@link Geometry} for the vector tile feature.
* @param id a long with the vector tile feature id field.
*/
public void addFeature(String layerName, Map<String, ?> attributes, Geometry geometry, long id) {
// skip small Polygon/LineString.
if (geometry instanceof MultiPolygon && geometry.getArea() < minimumArea) {
return;
}
if (geometry instanceof Polygon && geometry.getArea() < minimumArea) {
return;
}
if (geometry instanceof LineString && geometry.getLength() < minimumLength) {
return;
}
// special handling of GeometryCollection. subclasses are not handled here.
if (geometry.getClass().equals(GeometryCollection.class)) {
for (int i = 0; i < geometry.getNumGeometries(); i++) {
Geometry subGeometry = geometry.getGeometryN(i);
// keeping the id. any better suggestion?
addFeature(layerName, attributes, subGeometry, id);
}
return;
}
// About to simplify and clip. Looks like simplification before clipping is
// faster than clipping before simplification
// simplify non-points
if (simplificationDistanceTolerance > 0.0 && !(geometry instanceof Point)) {
if (geometry instanceof LineString || geometry instanceof MultiLineString) {
geometry = DouglasPeuckerSimplifier.simplify(geometry, simplificationDistanceTolerance);
} else if (geometry instanceof Polygon || geometry instanceof MultiPolygon) {
Geometry simplified = DouglasPeuckerSimplifier.simplify(geometry, simplificationDistanceTolerance);
// extra check to prevent polygon converted to line
if (simplified instanceof Polygon || simplified instanceof MultiPolygon) {
geometry = simplified;
} else {
geometry = TopologyPreservingSimplifier.simplify(geometry, simplificationDistanceTolerance);
}
} else {
geometry = TopologyPreservingSimplifier.simplify(geometry, simplificationDistanceTolerance);
}
}
// clip geometry
if (geometry instanceof Point) {
if (!clipCovers(geometry)) {
return;
}
} else {
geometry = clipGeometry(geometry);
}
// no need to add empty geometry
if (geometry == null || geometry.isEmpty()) {
return;
}
Layer layer = layers.get(layerName);
if (layer == null) {
layer = new Layer();
layers.put(layerName, layer);
}
Feature feature = new Feature();
feature.geometry = geometry;
feature.id = id;
this.autoincrement = Math.max(this.autoincrement, id + 1);
for (Map.Entry<String, ?> e : attributes.entrySet()) {
// skip attribute without value
if (e.getValue() == null) {
continue;
}
feature.tags.add(layer.key(e.getKey()));
feature.tags.add(layer.value(e.getValue()));
}
layer.features.add(feature);
} | 3.68 |
hadoop_Sets_newHashSetWithExpectedSize | /**
* Returns a new hash set using the smallest initial table size that can hold
* {@code expectedSize} elements without resizing. Note that this is not what
* {@link HashSet#HashSet(int)} does, but it is what most users want and
* expect it to do.
*
* <p>This behavior can't be broadly guaranteed, but has been tested with
* OpenJDK 1.7 and 1.8.</p>
*
* @param expectedSize the number of elements you expect to add to the
* returned set
* @param <E> Generics Type E.
* @return a new, empty hash set with enough capacity to hold
* {@code expectedSize} elements without resizing
* @throws IllegalArgumentException if {@code expectedSize} is negative
*/
public static <E> HashSet<E> newHashSetWithExpectedSize(int expectedSize) {
return new HashSet<E>(capacity(expectedSize));
} | 3.68 |
hadoop_RequestFactoryImpl_builder | /**
* Create a builder.
* @return new builder.
*/
public static RequestFactoryBuilder builder() {
return new RequestFactoryBuilder();
} | 3.68 |
hudi_HoodieTable_getCompletedCommitsTimeline | /**
* Get only the completed (no-inflights) commit + deltacommit timeline.
*/
public HoodieTimeline getCompletedCommitsTimeline() {
return metaClient.getCommitsTimeline().filterCompletedInstants();
} | 3.68 |
hudi_WriteStatus_markSuccess | /**
* Used by native write handles like HoodieRowCreateHandle and HoodieRowDataCreateHandle.
*
* @see WriteStatus#markSuccess(HoodieRecord, Option)
*/
@PublicAPIMethod(maturity = ApiMaturityLevel.EVOLVING)
public void markSuccess(HoodieRecordDelegate recordDelegate, Option<Map<String, String>> optionalRecordMetadata) {
if (trackSuccessRecords) {
writtenRecordDelegates.add(Objects.requireNonNull(recordDelegate));
}
updateStatsForSuccess(optionalRecordMetadata);
} | 3.68 |
hbase_Procedure_bypass | /**
* Set the bypass to true. Only called in
* {@link ProcedureExecutor#bypassProcedure(long, long, boolean, boolean)} for now. DO NOT use
* this method alone, since we can't just bypass one single procedure. We need to bypass its
* ancestor too. If your Procedure has set state, it needs to undo it in here.
* @param env Current environment. May be null because of context; e.g. pretty-printing procedure
* WALs where there is no 'environment' (and where Procedures that require an
* 'environment' won't be run.
*/
protected void bypass(TEnvironment env) {
this.bypass = true;
} | 3.68 |
querydsl_JTSGeometryExpression_contains | /**
* Returns 1 (TRUE) if this geometric object “spatially contains” anotherGeometry.
*
* @param geometry other geometry
* @return true, if contains
*/
public BooleanExpression contains(Expression<? extends Geometry> geometry) {
return Expressions.booleanOperation(SpatialOps.CONTAINS, mixin, geometry);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.