name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_AbfsHttpOperation_toString | // Returns a trace message for the request
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append(statusCode);
sb.append(",");
sb.append(storageErrorCode);
sb.append(",");
sb.append(expectedAppendPos);
sb.append(",cid=");
sb.append(getClientRequestId());
sb.append(",rid=");
sb.append(requestId);
sb.append(",connMs=");
sb.append(connectionTimeMs);
sb.append(",sendMs=");
sb.append(sendRequestTimeMs);
sb.append(",recvMs=");
sb.append(recvResponseTimeMs);
sb.append(",sent=");
sb.append(bytesSent);
sb.append(",recv=");
sb.append(bytesReceived);
sb.append(",");
sb.append(method);
sb.append(",");
sb.append(getMaskedUrl());
return sb.toString();
} | 3.68 |
flink_FileMergingSnapshotManagerBuilder_build | /**
* Create file-merging snapshot manager based on configuration.
*
* <p>TODO (FLINK-32074): Support another type of FileMergingSnapshotManager that merges files
* across different checkpoints.
*
* @return the created manager.
*/
public FileMergingSnapshotManager build() {
return new WithinCheckpointFileMergingSnapshotManager(
id, ioExecutor == null ? Runnable::run : ioExecutor);
} | 3.68 |
hadoop_AzureADAuthenticator_getTokenFromMsi | /**
* Gets AAD token from the local virtual machine's VM extension. This only works on
* an Azure VM with MSI extension
* enabled.
*
* @param authEndpoint the OAuth 2.0 token endpoint associated
* with the user's directory (obtain from
* Active Directory configuration)
* @param tenantGuid (optional) The guid of the AAD tenant. Can be {@code null}.
* @param clientId (optional) The clientId guid of the MSI service
* principal to use. Can be {@code null}.
* @param bypassCache {@code boolean} specifying whether a cached token is acceptable or a fresh token
* request should me made to AAD
* @return {@link AzureADToken} obtained using the creds
* @throws IOException throws IOException if there is a failure in obtaining the token
*/
public static AzureADToken getTokenFromMsi(final String authEndpoint,
final String tenantGuid, final String clientId, String authority,
boolean bypassCache) throws IOException {
QueryParams qp = new QueryParams();
qp.add("api-version", "2018-02-01");
qp.add("resource", RESOURCE_NAME);
if (tenantGuid != null && tenantGuid.length() > 0) {
authority = authority + tenantGuid;
LOG.debug("MSI authority : {}", authority);
qp.add("authority", authority);
}
if (clientId != null && clientId.length() > 0) {
qp.add("client_id", clientId);
}
if (bypassCache) {
qp.add("bypass_cache", "true");
}
Hashtable<String, String> headers = new Hashtable<>();
headers.put("Metadata", "true");
LOG.debug("AADToken: starting to fetch token using MSI");
return getTokenCall(authEndpoint, qp.serialize(), headers, "GET", true);
} | 3.68 |
hudi_HoodieTableFactory_setupCompactionOptions | /**
* Sets up the compaction options from the table definition.
*/
private static void setupCompactionOptions(Configuration conf) {
int commitsToRetain = conf.getInteger(FlinkOptions.CLEAN_RETAIN_COMMITS);
int minCommitsToKeep = conf.getInteger(FlinkOptions.ARCHIVE_MIN_COMMITS);
if (commitsToRetain >= minCommitsToKeep) {
LOG.info("Table option [{}] is reset to {} to be greater than {}={},\n"
+ "to avoid risk of missing data from few instants in incremental pull",
FlinkOptions.ARCHIVE_MIN_COMMITS.key(), commitsToRetain + 10,
FlinkOptions.CLEAN_RETAIN_COMMITS.key(), commitsToRetain);
conf.setInteger(FlinkOptions.ARCHIVE_MIN_COMMITS, commitsToRetain + 10);
conf.setInteger(FlinkOptions.ARCHIVE_MAX_COMMITS, commitsToRetain + 20);
}
} | 3.68 |
flink_OperationManager_cancelOperation | /**
* Cancel the execution of the operation.
*
* @param operationHandle identifies the {@link Operation}.
*/
public void cancelOperation(OperationHandle operationHandle) {
getOperation(operationHandle).cancel();
} | 3.68 |
framework_VaadinFinderLocatorStrategy_getElementsByPath | /**
* {@inheritDoc}
*/
@Override
public List<Element> getElementsByPath(String path) {
List<SelectorPredicate> postFilters = SelectorPredicate
.extractPostFilterPredicates(path);
if (!postFilters.isEmpty()) {
path = path.substring(1, path.lastIndexOf(')'));
}
List<Element> elements = new ArrayList<>();
if (LocatorUtil.isNotificationElement(path)) {
for (VNotification n : findNotificationsByPath(path)) {
elements.add(n.getElement());
}
} else {
final UIConnector uiConnector = client.getUIConnector();
elements.addAll(
eliminateDuplicates(getElementsByPathStartingAtConnector(
path, uiConnector, Document.get().getBody())));
}
for (SelectorPredicate p : postFilters) {
// Post filtering supports only indexes and follows instruction
// blindly. Index that is outside of our list results into an empty
// list and multiple indexes are likely to ruin a search completely
if (p.getIndex() >= 0) {
if (p.getIndex() >= elements.size()) {
elements.clear();
} else {
Element e = elements.get(p.getIndex());
elements.clear();
elements.add(e);
}
}
}
return elements;
} | 3.68 |
hbase_UserPermission_getAccessScope | /**
* Get this permission access scope.
* @return access scope
*/
public Permission.Scope getAccessScope() {
return permission.getAccessScope();
} | 3.68 |
hadoop_TimelineEvents_setEntityType | /**
* Set the entity type
*
* @param entityType
* the entity type
*/
public void setEntityType(String entityType) {
this.entityType = entityType;
} | 3.68 |
morf_SpreadsheetDataSetProducer_open | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.dataset.DataSetProducer#open()
*/
@Override
public void open() {
// Nothing to do
} | 3.68 |
framework_Calendar_getStartDate | /**
* Gets the calendar's start date.
*
* @return First visible date.
*/
public Date getStartDate() {
if (startDate == null) {
currentCalendar.set(java.util.Calendar.MILLISECOND, 0);
currentCalendar.set(java.util.Calendar.SECOND, 0);
currentCalendar.set(java.util.Calendar.MINUTE, 0);
currentCalendar.set(java.util.Calendar.HOUR_OF_DAY, 0);
currentCalendar.set(java.util.Calendar.DAY_OF_WEEK,
currentCalendar.getFirstDayOfWeek());
return currentCalendar.getTime();
}
return startDate;
} | 3.68 |
framework_WebBrowser_getBrowserMajorVersion | /**
* Gets the major version of the browser the user is using.
*
* <p>
* Note that Internet Explorer in IE7 compatibility mode might return 8 in
* some cases even though it should return 7.
* </p>
*
* @return The major version of the browser or -1 if not known.
*/
public int getBrowserMajorVersion() {
if (browserDetails == null) {
return -1;
}
return browserDetails.getBrowserMajorVersion();
} | 3.68 |
hbase_CachedMobFile_close | /**
* Decreases the reference of the underlying reader for the mob file. It's not thread-safe. Use
* MobFileCache.closeFile() instead. This underlying reader isn't closed until the reference is 0.
*/
@Override
public void close() throws IOException {
long refs = referenceCount.decrementAndGet();
if (refs == 0) {
super.close();
}
} | 3.68 |
pulsar_JvmUsage_populateFrom | /*
* factory method that returns a new instance of class by populating it from metrics, we assume that the metrics is
* jvm metrics or
*
*/
public static JvmUsage populateFrom(Map<String, Object> metrics) {
JvmUsage jvmUsage = null;
if (metrics.containsKey("jvm_thread_cnt")) {
jvmUsage = new JvmUsage();
jvmUsage.threadCount = Long.valueOf(metrics.get("jvm_thread_cnt").toString());
}
return jvmUsage;
} | 3.68 |
hadoop_TaskManifest_load | /**
* Load an instance from a file, then validate it.
* If loading through a listing; use this API so that filestatus
* hints can be used.
* @param serializer serializer.
* @param fs filesystem
* @param path path to load from
* @param status status of file to load
* @return the loaded instance
* @throws IOException IO failure/the data is invalid
*/
public static TaskManifest load(
JsonSerialization<TaskManifest> serializer,
FileSystem fs,
Path path,
FileStatus status)
throws IOException {
LOG.debug("Reading Manifest in file {}", path);
return serializer.load(fs, path, status)
.validate();
} | 3.68 |
hadoop_BlockBlobAppendStream_write | /**
* Writes length bytes from the specified byte array starting at offset to
* this output stream.
*
* @param data
* the byte array to write.
* @param offset
* the start offset in the data.
* @param length
* the number of bytes to write.
* @throws IOException
* if an I/O error occurs. In particular, an IOException may be
* thrown if the output stream has been closed.
*/
@Override
public synchronized void write(final byte[] data, int offset, int length)
throws IOException {
Preconditions.checkArgument(data != null, "null data");
if (offset < 0 || length < 0 || length > data.length - offset) {
throw new IndexOutOfBoundsException();
}
if (closed) {
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
}
while (outBuffer.remaining() < length) {
int remaining = outBuffer.remaining();
outBuffer.put(data, offset, remaining);
// upload payload to azure storage
addBlockUploadCommand();
offset += remaining;
length -= remaining;
}
outBuffer.put(data, offset, length);
} | 3.68 |
hbase_Procedure_completionCleanup | /**
* Called when the procedure is marked as completed (success or rollback). The procedure
* implementor may use this method to cleanup in-memory states. This operation will not be retried
* on failure. If a procedure took a lock, it will have been released when this method runs.
*/
protected void completionCleanup(TEnvironment env) {
// no-op
} | 3.68 |
flink_ClusterEntrypointUtils_tryFindUserLibDirectory | /**
* Tries to find the user library directory.
*
* @return the user library directory if it exits, returns {@link Optional#empty()} if there is
* none
*/
public static Optional<File> tryFindUserLibDirectory() {
final File flinkHomeDirectory = deriveFlinkHomeDirectoryFromLibDirectory();
final File usrLibDirectory =
new File(flinkHomeDirectory, ConfigConstants.DEFAULT_FLINK_USR_LIB_DIR);
if (!usrLibDirectory.isDirectory()) {
return Optional.empty();
}
return Optional.of(usrLibDirectory);
} | 3.68 |
hadoop_AbstractTask_setTaskId | /**
* Set Task Id.
* @param taskId : Task Identifier
*/
@Override
public final void setTaskId(final TaskId taskId) {
if (taskId != null) {
this.taskID = taskId;
}
} | 3.68 |
hbase_RESTServlet_getTable | /**
* Caller closes the table afterwards.
*/
Table getTable(String tableName) throws IOException {
return connectionCache.getTable(tableName);
} | 3.68 |
hadoop_RenameOperation_getUploadsAborted | /**
* Get the count of uploads aborted.
* Non-empty iff enabled, and the operations completed without errors.
* @return count of aborted uploads.
*/
public Optional<Long> getUploadsAborted() {
return uploadsAborted;
} | 3.68 |
graphhopper_RamerDouglasPeucker_removeNaN | /**
* Fills all entries of the point list that are NaN with the subsequent values (and therefore shortens the list)
*/
static void removeNaN(PointList pointList) {
int curr = 0;
for (int i = 0; i < pointList.size(); i++) {
if (!Double.isNaN(pointList.getLat(i))) {
pointList.set(curr, pointList.getLat(i), pointList.getLon(i), pointList.getEle(i));
curr++;
}
}
pointList.trimToSize(curr);
} | 3.68 |
AreaShop_Utils_getSetAndDefaults | /**
* Get a string list from the config, combined with the entries specified in the default config.
* @param path The path to read the lists from
* @return List with all values defined in the config and the default config combined
*/
private static Set<String> getSetAndDefaults(String path) {
Set<String> result = new HashSet<>(config.getStringList(path));
ConfigurationSection defaults = config.getDefaults();
if(defaults != null) {
result.addAll(defaults.getStringList(path));
}
return result;
} | 3.68 |
hbase_MonitoredRPCHandlerImpl_isRPCRunning | /**
* Indicates to the client whether this task is monitoring a currently active RPC call.
* @return true if the monitored handler is currently servicing an RPC call.
*/
@Override
public boolean isRPCRunning() {
return getState() == State.RUNNING;
} | 3.68 |
flink_MethodlessRouter_removePathPattern | /** Removes the route specified by the path pattern. */
public void removePathPattern(String pathPattern) {
PathPattern p = new PathPattern(pathPattern);
T target = routes.remove(p);
if (target == null) {
return;
}
} | 3.68 |
framework_JsonDecoder_decodeValue | /**
* Decode a JSON array with two elements (type and value) into a client-side
* type, recursively if necessary.
*
* @param jsonValue
* JSON value with encoded data
* @param connection
* reference to the current ApplicationConnection
* @return decoded value (does not contain JSON types)
*/
public static Object decodeValue(Type type, JsonValue jsonValue,
Object target, ApplicationConnection connection) {
String baseTypeName = type.getBaseTypeName();
if (baseTypeName.startsWith("elemental.json.Json")) {
return jsonValue;
}
// Null is null, regardless of type (except JSON)
if (jsonValue.getType() == JsonType.NULL) {
return null;
}
if (Map.class.getName().equals(baseTypeName)
|| HashMap.class.getName().equals(baseTypeName)) {
return decodeMap(type, jsonValue, connection);
} else if (List.class.getName().equals(baseTypeName)
|| ArrayList.class.getName().equals(baseTypeName)) {
assert jsonValue.getType() == JsonType.ARRAY;
return decodeList(type, (JsonArray) jsonValue, connection);
} else if (Set.class.getName().equals(baseTypeName)) {
assert jsonValue.getType() == JsonType.ARRAY;
return decodeSet(type, (JsonArray) jsonValue, connection);
} else if (String.class.getName().equals(baseTypeName)) {
return jsonValue.asString();
} else if (Integer.class.getName().equals(baseTypeName)) {
return Integer.valueOf((int) jsonValue.asNumber());
} else if (Long.class.getName().equals(baseTypeName)) {
return Long.valueOf((long) jsonValue.asNumber());
} else if (Float.class.getName().equals(baseTypeName)) {
return Float.valueOf((float) jsonValue.asNumber());
} else if (Double.class.getName().equals(baseTypeName)) {
return Double.valueOf(jsonValue.asNumber());
} else if (Boolean.class.getName().equals(baseTypeName)) {
return Boolean.valueOf(jsonValue.asString());
} else if (Byte.class.getName().equals(baseTypeName)) {
return Byte.valueOf((byte) jsonValue.asNumber());
} else if (Character.class.getName().equals(baseTypeName)) {
return Character.valueOf(jsonValue.asString().charAt(0));
} else if (Connector.class.getName().equals(baseTypeName)) {
return ConnectorMap.get(connection)
.getConnector(jsonValue.asString());
} else {
return decodeObject(type, jsonValue, target, connection);
}
} | 3.68 |
flink_StreamExecutionEnvironment_generateSequence | /**
* Creates a new data stream that contains a sequence of numbers. This is a parallel source, if
* you manually set the parallelism to {@code 1} (using {@link
* org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator#setParallelism(int)})
* the generated sequence of elements is in order.
*
* @param from The number to start at (inclusive)
* @param to The number to stop at (inclusive)
* @return A data stream, containing all number in the [from, to] interval
* @deprecated Use {@link #fromSequence(long, long)} instead to create a new data stream that
* contains {@link org.apache.flink.api.connector.source.lib.NumberSequenceSource}.
*/
@Deprecated
public DataStreamSource<Long> generateSequence(long from, long to) {
if (from > to) {
throw new IllegalArgumentException(
"Start of sequence must not be greater than the end");
}
return addSource(new StatefulSequenceSource(from, to), "Sequence Source (Deprecated)");
} | 3.68 |
hadoop_ResourceEstimatorService_deleteHistoryResourceSkyline | /**
* Delete history {@link ResourceSkyline}s from {@link SkylineStore}.
* <p> Note that for safety considerations, we only allow users to delete
* history {@link ResourceSkyline}s of one job run.
*
* @param pipelineId pipelineId of the history run.
* @param runId runId runId of the history run.
* @throws SkylineStoreException if fails to deleteHistory
* {@link ResourceSkyline}s.
*/
@DELETE @Path("/skylinestore/history/{pipelineId}/{runId}")
public void deleteHistoryResourceSkyline(
@PathParam("pipelineId") String pipelineId,
@PathParam("runId") String runId) throws SkylineStoreException {
RecurrenceId recurrenceId = new RecurrenceId(pipelineId, runId);
skylineStore.deleteHistory(recurrenceId);
LOGGER.info("Delete ResourceSkyline for recurrenceId: {}.", recurrenceId);
} | 3.68 |
morf_Deployment_writeStatements | /**
* Creates deployment statements using the supplied source meta data.
*
* @param targetSchema Schema that is to be deployed.
* @param sqlStatementWriter Recipient for the deployment statements.
*/
private void writeStatements(Schema targetSchema, SqlStatementWriter sqlStatementWriter) {
// Sort the tables by foreign key dependency order
// TODO Implement table sorting by dependency for deployment.
List<String> tableNames = new ArrayList<>(targetSchema.tableNames());
// Iterate through all the tables and deploy them
for (String tableName : tableNames) {
Table table = targetSchema.getTable(tableName);
sqlStatementWriter.writeSql(connectionResources.sqlDialect().tableDeploymentStatements(table));
}
Schema sourceSchema = UpgradeHelper.copySourceSchema(connectionResources, connectionResources.getDataSource(), new HashSet<>());
UpgradeSchemas upgradeSchemas = new UpgradeSchemas(sourceSchema, targetSchema);
ViewChanges viewChanges = new ViewChanges(targetSchema.views(), new HashSet<>(), targetSchema.views());
sqlStatementWriter.writeSql(UpgradeHelper.postSchemaUpgrade(upgradeSchemas,
viewChanges,
viewChangesDeploymentHelper));
} | 3.68 |
hbase_TableDescriptorBuilder_setNormalizerTargetRegionSize | /**
* Setting the target region size of table normalization.
* @param regionSize the target region size.
* @return the modifyable TD
*/
public ModifyableTableDescriptor setNormalizerTargetRegionSize(final long regionSize) {
return setValue(NORMALIZER_TARGET_REGION_SIZE_MB_KEY, Long.toString(regionSize));
} | 3.68 |
graphhopper_Country_find | /**
* @param iso should be ISO 3166-1 alpha-2
*/
public static Country find(String iso) {
return ALPHA2_MAP.get(iso);
} | 3.68 |
pulsar_PulsarClientImplementationBindingImpl_jsonifyKeyValueSchemaInfo | /**
* Jsonify the key/value schema info.
*
* @param kvSchemaInfo the key/value schema info
* @return the jsonified schema info
*/
public String jsonifyKeyValueSchemaInfo(KeyValue<SchemaInfo, SchemaInfo> kvSchemaInfo) {
return SchemaUtils.jsonifyKeyValueSchemaInfo(kvSchemaInfo);
} | 3.68 |
graphhopper_GTFSFeed_loadFromZipfileOrDirectory | /**
* The order in which we load the tables is important for two reasons.
* 1. We must load feed_info first so we know the feed ID before loading any other entities. This could be relaxed
* by having entities point to the feed object rather than its ID String.
* 2. Referenced entities must be loaded before any entities that reference them. This is because we check
* referential integrity while the files are being loaded. This is done on the fly during loading because it allows
* us to associate a line number with errors in objects that don't have any other clear identifier.
*
* Interestingly, all references are resolvable when tables are loaded in alphabetical order.
*/
public void loadFromZipfileOrDirectory(File zip, String fid) throws IOException {
if (this.loaded) throw new UnsupportedOperationException("Attempt to load GTFS into existing database");
new FeedInfo.Loader(this).loadTable(zip);
// maybe we should just point to the feed object itself instead of its ID, and null out its stoptimes map after loading
if (fid != null) {
feedId = fid;
LOG.info("Feed ID is undefined, pester maintainers to include a feed ID. Using file name {}.", feedId); // TODO log an error, ideally feeds should include a feedID
}
else if (feedId == null || feedId.isEmpty()) {
feedId = new File(zip.getName()).getName().replaceAll("\\.zip$", "");
LOG.info("Feed ID is undefined, pester maintainers to include a feed ID. Using file name {}.", feedId); // TODO log an error, ideally feeds should include a feedID
}
else {
LOG.info("Feed ID is '{}'.", feedId);
}
db.getAtomicString("feed_id").set(feedId);
new Agency.Loader(this).loadTable(zip);
if (agency.isEmpty()) {
errors.add(new GeneralError("agency", 0, "agency_id", "Need at least one agency."));
}
// calendars and calendar dates are joined into services. This means a lot of manipulating service objects as
// they are loaded; since mapdb keys/values are immutable, load them in memory then copy them to MapDB once
// we're done loading them
Map<String, Service> serviceTable = new HashMap<>();
new Calendar.Loader(this, serviceTable).loadTable(zip);
new CalendarDate.Loader(this, serviceTable).loadTable(zip);
this.services.putAll(serviceTable);
serviceTable = null; // free memory
// Same deal
Map<String, Fare> fares = new HashMap<>();
new FareAttribute.Loader(this, fares).loadTable(zip);
new FareRule.Loader(this, fares).loadTable(zip);
this.fares.putAll(fares);
fares = null; // free memory
new Route.Loader(this).loadTable(zip);
new ShapePoint.Loader(this).loadTable(zip);
new Stop.Loader(this).loadTable(zip);
new Transfer.Loader(this).loadTable(zip);
new Trip.Loader(this).loadTable(zip);
new Frequency.Loader(this).loadTable(zip);
new StopTime.Loader(this).loadTable(zip);
loaded = true;
} | 3.68 |
framework_CalendarConnector_getActionCaption | /**
* Get the text that is displayed for a context menu item.
*
* @param actionKey
* The unique action key
* @return
*/
public String getActionCaption(String actionKey) {
return actionMap.get(actionKey + "_c");
} | 3.68 |
framework_VContextMenu_showAt | /**
* Shows context menu at given location IF it contain at least one item.
*
* @param left
* @param top
*/
public void showAt(int left, int top) {
final Action[] actions = actionOwner.getActions();
if (actions == null || actions.length == 0) {
// Only show if there really are actions
return;
}
this.left = left;
this.top = top;
menu.clearItems();
for (final Action a : actions) {
menu.addItem(new MenuItem(a.getHTML(), true, a));
}
// Attach onload listeners to all images
WidgetUtil.sinkOnloadForImages(menu.getElement());
// Store the currently focused element, which will be re-focused when
// context menu is closed
focusedElement = WidgetUtil.getFocusedElement();
// reset height (if it has been previously set explicitly)
setHeight("");
setPopupPositionAndShow((offsetWidth, offsetHeight) -> {
// mac FF gets bad width due GWT popups overflow hacks,
// re-determine width
offsetWidth = menu.getOffsetWidth();
int menuLeft = VContextMenu.this.left;
int menuTop = VContextMenu.this.top;
if (offsetWidth + menuLeft > Window.getClientWidth()) {
menuLeft = menuLeft - offsetWidth;
if (menuLeft < 0) {
menuLeft = 0;
}
}
if (offsetHeight + menuTop > Window.getClientHeight()) {
menuTop = Math.max(0, Window.getClientHeight() - offsetHeight);
}
if (menuTop == 0) {
setHeight(Window.getClientHeight() + "px");
}
setPopupPosition(menuLeft, menuTop);
getElement().getStyle().setPosition(Style.Position.FIXED);
/*
* Move keyboard focus to menu, deferring the focus setting so the
* focus is certainly moved to the menu in all browser after the
* positioning has been done.
*/
Scheduler.get().scheduleDeferred(() -> {
// Focus the menu.
menu.setFocus(true);
// Unselect previously selected items
menu.selectItem(null);
});
});
} | 3.68 |
hbase_ProcedureMember_closeAndWait | /**
* Shutdown the threadpool, and wait for upto timeoutMs millis before bailing
* @param timeoutMs timeout limit in millis
* @return true if successfully, false if bailed due to timeout.
*/
boolean closeAndWait(long timeoutMs) throws InterruptedException {
pool.shutdown();
return pool.awaitTermination(timeoutMs, TimeUnit.MILLISECONDS);
} | 3.68 |
hbase_CommonFSUtils_isExists | /**
* Calls fs.exists(). Checks if the specified path exists
* @param fs must not be null
* @param path must not be null
* @return the value returned by fs.exists()
* @throws IOException from underlying FileSystem
*/
public static boolean isExists(final FileSystem fs, final Path path) throws IOException {
return fs.exists(path);
} | 3.68 |
hudi_WriteProfiles_getCommitMetadata | /**
* Returns the commit metadata of the given instant.
*
* @param tableName The table name
* @param basePath The table base path
* @param instant The hoodie instant
* @param timeline The timeline
* @return the commit metadata
*/
public static HoodieCommitMetadata getCommitMetadata(
String tableName,
Path basePath,
HoodieInstant instant,
HoodieTimeline timeline) {
try {
return TimelineUtils.getCommitMetadata(instant, timeline);
} catch (IOException e) {
LOG.error("Get write metadata for table {} with instant {} and path: {} error",
tableName, instant.getTimestamp(), basePath);
throw new HoodieException(e);
}
} | 3.68 |
flink_FlinkContainersSettings_builder | /**
* A new builder for {@code FlinkContainersConfig}.
*
* @return The builder.
*/
public static Builder builder() {
return new Builder();
} | 3.68 |
hadoop_MRJobConfUtil_getTaskProgressReportInterval | /**
* Get the progress heartbeat interval configuration for mapreduce tasks.
* By default, the value of progress heartbeat interval is a proportion of
* that of task timeout.
* @param conf the job configuration to read from
* @return the value of task progress report interval
*/
public static long getTaskProgressReportInterval(final Configuration conf) {
long taskHeartbeatTimeOut = conf.getLong(
MRJobConfig.TASK_TIMEOUT, MRJobConfig.DEFAULT_TASK_TIMEOUT_MILLIS);
return conf.getLong(MRJobConfig.TASK_PROGRESS_REPORT_INTERVAL,
(long) (TASK_REPORT_INTERVAL_TO_TIMEOUT_RATIO * taskHeartbeatTimeOut));
} | 3.68 |
framework_VComboBox_updateRootWidth | /**
* Calculates the width of the select if the select has undefined width.
* Should be called when the width changes or when the icon changes.
* <p>
* For internal use only. May be removed or replaced in the future.
*/
public void updateRootWidth() {
debug("VComboBox: updateRootWidth()");
if (connector.isUndefinedWidth()) {
/*
* When the select has a undefined with we need to check that we are
* only setting the text box width relative to the first page width
* of the items. If this is not done the text box width will change
* when the popup is used to view longer items than the text box is
* wide.
*/
int w = WidgetUtil.getRequiredWidth(this);
if (dataReceivedHandler.isWaitingForInitialData()
&& suggestionPopupMinWidth > w) {
/*
* We want to compensate for the paddings just to preserve the
* exact size as in Vaadin 6.x, but we get here before
* MeasuredSize has been initialized.
* Util.measureHorizontalPaddingAndBorder does not work with
* border-box, so we must do this the hard way.
*/
Style style = getElement().getStyle();
String originalPadding = style.getPadding();
String originalBorder = style.getBorderWidth();
style.setPaddingLeft(0, Unit.PX);
style.setBorderWidth(0, Unit.PX);
style.setProperty("padding", originalPadding);
style.setProperty("borderWidth", originalBorder);
// Use util.getRequiredWidth instead of getOffsetWidth here
int iconWidth = selectedItemIcon == null ? 0
: WidgetUtil.getRequiredWidth(selectedItemIcon);
int buttonWidth = popupOpener == null ? 0
: WidgetUtil.getRequiredWidth(popupOpener);
/*
* Instead of setting the width of the wrapper, set the width of
* the combobox. Subtract the width of the icon and the
* popupopener
*/
tb.setWidth(suggestionPopupMinWidth - iconWidth - buttonWidth
+ "px");
}
/*
* Lock the textbox width to its current value if it's not already
* locked. This can happen after setWidth("") which resets the
* textbox width to "100%".
*/
if (!tb.getElement().getStyle().getWidth().endsWith("px")) {
int iconWidth = selectedItemIcon == null ? 0
: selectedItemIcon.getOffsetWidth();
tb.setWidth(tb.getOffsetWidth() - iconWidth + "px");
}
}
} | 3.68 |
morf_MySqlDialect_getFromDummyTable | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getFromDummyTable()
*/
@Override
protected String getFromDummyTable() {
return " FROM dual";
} | 3.68 |
flink_Tuple20_copy | /**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple20<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19>
copy() {
return new Tuple20<>(
this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8,
this.f9, this.f10, this.f11, this.f12, this.f13, this.f14, this.f15, this.f16,
this.f17, this.f18, this.f19);
} | 3.68 |
hudi_BaseHoodieTableServiceClient_inlineLogCompact | /**
* Performs a log compaction operation on a table, serially before or after an insert/upsert action.
*/
protected Option<String> inlineLogCompact(Option<Map<String, String>> extraMetadata) {
Option<String> logCompactionInstantTimeOpt = scheduleLogCompaction(extraMetadata);
logCompactionInstantTimeOpt.ifPresent(logCompactInstantTime -> {
// inline log compaction should auto commit as the user is never given control
logCompact(logCompactInstantTime, true);
});
return logCompactionInstantTimeOpt;
} | 3.68 |
flink_SlidingEventTimeWindows_of | /**
* Creates a new {@code SlidingEventTimeWindows} {@link WindowAssigner} that assigns elements to
* time windows based on the element timestamp and offset.
*
* <p>For example, if you want window a stream by hour,but window begins at the 15th minutes of
* each hour, you can use {@code of(Time.hours(1),Time.minutes(15))},then you will get time
* windows start at 0:15:00,1:15:00,2:15:00,etc.
*
* <p>Rather than that,if you are living in somewhere which is not using UTC±00:00 time, such as
* China which is using UTC+08:00,and you want a time window with size of one day, and window
* begins at every 00:00:00 of local time,you may use {@code of(Time.days(1),Time.hours(-8))}.
* The parameter of offset is {@code Time.hours(-8))} since UTC+08:00 is 8 hours earlier than
* UTC time.
*
* @param size The size of the generated windows.
* @param slide The slide interval of the generated windows.
* @param offset The offset which window start would be shifted by.
* @return The time policy.
*/
public static SlidingEventTimeWindows of(Time size, Time slide, Time offset) {
return new SlidingEventTimeWindows(
size.toMilliseconds(), slide.toMilliseconds(), offset.toMilliseconds());
} | 3.68 |
hmily_JavaBeanBinder_get | /**
* Get bean.
*
* @param <T> the type parameter
* @param bindable the bindable
* @param canCallGetValue the can call get value
* @return the bean
*/
@SuppressWarnings("unchecked")
static <T> Bean<T> get(final BindData<T> bindable, final boolean canCallGetValue) {
Class<?> type = bindable.getType().getTypeClass();
Supplier<T> value = bindable.getValue();
T instance = null;
if (canCallGetValue && value != null) {
instance = value.get();
type = instance != null ? instance.getClass() : type;
}
if (instance == null && !isInstantiable(type)) {
return null;
}
Bean<?> bean = Bean.cached;
if (bean == null || !type.equals(bean.getType())) {
bean = new Bean<>(type);
cached = bean;
}
return (Bean<T>) bean;
} | 3.68 |
hadoop_AbfsConfiguration_setBoolean | /**
* Sets boolean in the underlying Configuration object.
* Provided only as a convenience; does not add any account logic.
* @param key Configuration key
* @param value Configuration value
*/
public void setBoolean(String key, boolean value) {
rawConfig.setBoolean(key, value);
} | 3.68 |
hbase_DelayedUtil_getRemainingTime | /** Returns Time remaining as milliseconds. */
public static long getRemainingTime(final TimeUnit resultUnit, final long timeout) {
final long currentTime = EnvironmentEdgeManager.currentTime();
if (currentTime >= timeout) {
return 0;
}
return resultUnit.convert(timeout - currentTime, TimeUnit.MILLISECONDS);
} | 3.68 |
hadoop_LocalityMulticastAMRMProxyPolicy_splitAnyRequests | /**
* It splits a list of non-localized resource requests among sub-clusters.
*/
private void splitAnyRequests(List<ResourceRequest> originalResourceRequests,
AllocationBookkeeper allocationBookkeeper) throws YarnException {
for (ResourceRequest resourceRequest : originalResourceRequests) {
// FIRST: pick the target set of subclusters (based on whether this RR
// is associated with other localized requests via an allocationId)
Long allocationId = resourceRequest.getAllocationRequestId();
Set<SubClusterId> targetSubclusters;
if (allocationBookkeeper.getSubClustersForId(allocationId) != null) {
targetSubclusters =
allocationBookkeeper.getSubClustersForId(allocationId);
} else {
targetSubclusters = allocationBookkeeper.getActiveAndEnabledSC();
}
// SECOND: pick how much to ask each RM for each request
splitIndividualAny(resourceRequest, targetSubclusters,
allocationBookkeeper);
}
} | 3.68 |
hibernate-validator_AnnotationProxy_hashCode | /**
* Calculates the hash code of this annotation proxy as described in
* {@link Annotation#hashCode()}.
*
* @return The hash code of this proxy.
*
* @see Annotation#hashCode()
*/
@Override
public int hashCode() {
return descriptor.hashCode();
} | 3.68 |
flink_NetworkBufferPool_recyclePooledMemorySegment | /**
* Corresponding to {@link #requestPooledMemorySegmentsBlocking} and {@link
* #requestPooledMemorySegment}, this method is for pooled memory segments recycling.
*/
public void recyclePooledMemorySegment(MemorySegment segment) {
// Adds the segment back to the queue, which does not immediately free the memory
// however, since this happens when references to the global pool are also released,
// making the availableMemorySegments queue and its contained object reclaimable
internalRecycleMemorySegments(Collections.singleton(checkNotNull(segment)));
} | 3.68 |
hmily_HmilyMySQLInsertStatement_getSetAssignment | /**
* Get set assignment segment.
*
* @return set assignment segment
*/
public Optional<HmilySetAssignmentSegment> getSetAssignment() {
return Optional.ofNullable(setAssignment);
} | 3.68 |
framework_VTabsheet_focusNextTab | /**
* Right arrow key focus move. Selection won't change until the
* selection key is pressed, but the target tab must be selectable. If
* no selectable tabs are found after currently focused tab, focus isn't
* moved.
*/
private void focusNextTab() {
int newTabIndex = focusedTabIndex;
// Find the next visible and enabled tab if any.
do {
newTabIndex++;
} while (newTabIndex < getTabCount() && !canSelectTab(newTabIndex));
if (newTabIndex < getTabCount()) {
keyFocusTab(newTabIndex);
}
} | 3.68 |
flink_BlobServer_close | /** Shuts down the BLOB server. */
@Override
public void close() throws IOException {
cleanupTimer.cancel();
if (shutdownRequested.compareAndSet(false, true)) {
Exception exception = null;
try {
this.serverSocket.close();
} catch (IOException ioe) {
exception = ioe;
}
// wake the thread up, in case it is waiting on some operation
interrupt();
try {
join();
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
LOG.debug("Error while waiting for this thread to die.", ie);
}
synchronized (activeConnections) {
if (!activeConnections.isEmpty()) {
for (BlobServerConnection conn : activeConnections) {
LOG.debug("Shutting down connection {}.", conn.getName());
conn.close();
}
activeConnections.clear();
}
}
// Clean up the storage directory if it is owned
try {
storageDir
.owned()
.ifPresent(FunctionUtils.uncheckedConsumer(FileUtils::deleteDirectory));
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
// Remove shutdown hook to prevent resource leaks
ShutdownHookUtil.removeShutdownHook(shutdownHook, getClass().getSimpleName(), LOG);
if (LOG.isInfoEnabled()) {
LOG.info(
"Stopped BLOB server at {}:{}",
serverSocket.getInetAddress().getHostAddress(),
getPort());
}
ExceptionUtils.tryRethrowIOException(exception);
}
} | 3.68 |
morf_Cast_deepCopyInternal | /**
* @see org.alfasoftware.morf.sql.element.AliasedField#deepCopyInternal(DeepCopyTransformation)
*/
@Override
protected AliasedField deepCopyInternal(DeepCopyTransformation transformer) {
return new Cast(getAlias(), transformer.deepCopy(expression), dataType, width, scale);
} | 3.68 |
dubbo_AdaptiveClassCodeGenerator_generateImports | /**
* generate imports
*/
private String generateImports() {
StringBuilder builder = new StringBuilder();
builder.append(String.format(CODE_IMPORTS, ScopeModel.class.getName()));
builder.append(String.format(CODE_IMPORTS, ScopeModelUtil.class.getName()));
return builder.toString();
} | 3.68 |
flink_TGetQueryIdResp_isSetQueryId | /** Returns true if field queryId is set (has been assigned a value) and false otherwise */
public boolean isSetQueryId() {
return this.queryId != null;
} | 3.68 |
querydsl_AntJPADomainExporter_execute | /**
* Exports the named persistence unit's metamodel to Querydsl query types. Expects to be
* called by Ant via name convention using a method with signature public void execute().
*/
public void execute() {
// We can assume we have the named persistence unit and its mapping file in our classpath,
// but we may have to allow some properties in that persistence unit to be overridden before
// we can successfully get that persistence unit's metamodel.
Map<String, String> properties = (configuration != null) ? configuration.getProperties() : null;
EntityManagerFactory emf = Persistence.createEntityManagerFactory(persistenceUnitName, properties);
// Now we can get the persistence unit's metamodel and export it to Querydsl query types.
Metamodel configuration = emf.getMetamodel();
JPADomainExporter exporter = new JPADomainExporter(namePrefix, nameSuffix, new File(targetFolder), configuration);
try {
exporter.execute();
generatedFiles = exporter.getGeneratedFiles();
} catch (IOException e) {
throw new RuntimeException("Error in JPADomainExporter", e);
}
} | 3.68 |
framework_ContainerOrderedWrapper_getContainerProperty | /*
* Gets the Property identified by the given itemId and propertyId from the
* Container Don't add a JavaDoc comment here, we use the default
* documentation from implemented interface.
*/
@Override
public Property getContainerProperty(Object itemId, Object propertyId) {
return container.getContainerProperty(itemId, propertyId);
} | 3.68 |
flink_LambdaUtil_applyToAllWhileSuppressingExceptions | /**
* This method supplies all elements from the input to the consumer. Exceptions that happen on
* elements are suppressed until all elements are processed. If exceptions happened for one or
* more of the inputs, they are reported in a combining suppressed exception.
*
* @param inputs iterator for all inputs to the throwingConsumer.
* @param throwingConsumer this consumer will be called for all elements delivered by the input
* iterator.
* @param <T> the type of input.
* @throws Exception collected exceptions that happened during the invocation of the consumer on
* the input elements.
*/
public static <T> void applyToAllWhileSuppressingExceptions(
Iterable<T> inputs, ThrowingConsumer<T, ? extends Exception> throwingConsumer)
throws Exception {
if (inputs != null && throwingConsumer != null) {
Exception exception = null;
for (T input : inputs) {
if (input != null) {
try {
throwingConsumer.accept(input);
} catch (Exception ex) {
exception = ExceptionUtils.firstOrSuppressed(ex, exception);
}
}
}
if (exception != null) {
throw exception;
}
}
} | 3.68 |
framework_Cell_getColumn | /**
* Returns the index of the column the cell resides in.
*
* @return the column index
*/
public int getColumn() {
return column;
} | 3.68 |
pulsar_ResourceUnitRanking_getAllocatedLoadPercentageCPU | /**
* Percentage of CPU allocated to bundle's quota.
*/
public double getAllocatedLoadPercentageCPU() {
return this.allocatedLoadPercentageCPU;
} | 3.68 |
streampipes_OutputStrategies_userDefined | /**
* Creates a {@link org.apache.streampipes.model.output.UserDefinedOutputStrategy}. User-defined output strategies are
* fully flexible output strategies which are created by users at pipeline development time.
*
* @return UserDefinedOutputStrategy
*/
public static UserDefinedOutputStrategy userDefined() {
return new UserDefinedOutputStrategy();
} | 3.68 |
dubbo_Predicates_or | /**
* a composed predicate that represents a short-circuiting logical OR of {@link Predicate predicates}
*
* @param predicates {@link Predicate predicates}
* @param <T> the detected type
* @return non-null
*/
static <T> Predicate<T> or(Predicate<T>... predicates) {
return of(predicates).reduce(Predicate::or).orElse(e -> true);
} | 3.68 |
hbase_RestoreTool_getTableSnapshotPath | /**
* Returns value represent path for path to backup table snapshot directory:
* "/$USER/SBACKUP_ROOT/backup_id/namespace/table/.hbase-snapshot"
* @param backupRootPath backup root path
* @param tableName table name
* @param backupId backup Id
* @return path for snapshot
*/
Path getTableSnapshotPath(Path backupRootPath, TableName tableName, String backupId) {
return new Path(HBackupFileSystem.getTableBackupPath(tableName, backupRootPath, backupId),
HConstants.SNAPSHOT_DIR_NAME);
} | 3.68 |
dubbo_RpcStatus_getStatus | /**
* @param url
* @param methodName
* @return status
*/
public static RpcStatus getStatus(URL url, String methodName) {
String uri = url.toIdentityString();
ConcurrentMap<String, RpcStatus> map =
ConcurrentHashMapUtils.computeIfAbsent(METHOD_STATISTICS, uri, k -> new ConcurrentHashMap<>());
return ConcurrentHashMapUtils.computeIfAbsent(map, methodName, k -> new RpcStatus());
} | 3.68 |
hbase_ZKWatcher_setZnodeAclsRecursive | /**
* Set the znode perms recursively. This will do post-order recursion, so that baseZnode ACLs will
* be set last in case the master fails in between.
* @param znode the ZNode to set the permissions for
*/
private void setZnodeAclsRecursive(String znode) throws KeeperException, InterruptedException {
List<String> children = recoverableZooKeeper.getChildren(znode, false);
for (String child : children) {
setZnodeAclsRecursive(ZNodePaths.joinZNode(znode, child));
}
List<ACL> acls = createACL(znode, true);
LOG.info("Setting ACLs for znode:{} , acl:{}", znode, acls);
recoverableZooKeeper.setAcl(znode, acls, -1);
} | 3.68 |
hbase_MasterObserver_preSplitRegion | /**
* Called before the split region procedure is called.
* @param c the environment to interact with the framework and master
* @param tableName the table where the region belongs to
* @param splitRow split point
*/
default void preSplitRegion(final ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName, final byte[] splitRow) throws IOException {
} | 3.68 |
flink_TSetClientInfoResp_isSet | /**
* Returns true if field corresponding to fieldID is set (has been assigned a value) and false
* otherwise
*/
public boolean isSet(_Fields field) {
if (field == null) {
throw new java.lang.IllegalArgumentException();
}
switch (field) {
case STATUS:
return isSetStatus();
}
throw new java.lang.IllegalStateException();
} | 3.68 |
hbase_FileArchiverNotifierFactoryImpl_get | /**
* Returns the {@link FileArchiverNotifier} instance for the given {@link TableName}.
* @param tn The table to obtain a notifier for
* @return The notifier for the given {@code tablename}.
*/
public FileArchiverNotifier get(Connection conn, Configuration conf, FileSystem fs,
TableName tn) {
// Ensure that only one instance is exposed to callers
return CACHE.computeIfAbsent(tn, key -> new FileArchiverNotifierImpl(conn, conf, fs, key));
} | 3.68 |
flink_ResultPartition_canBeCompressed | /**
* Whether the buffer can be compressed or not. Note that event is not compressed because it is
* usually small and the size can become even larger after compression.
*/
protected boolean canBeCompressed(Buffer buffer) {
return bufferCompressor != null && buffer.isBuffer() && buffer.readableBytes() > 0;
} | 3.68 |
hbase_RawLong_decodeLong | /**
* Read a {@code long} value from the buffer {@code buff}.
*/
public long decodeLong(byte[] buff, int offset) {
return Bytes.toLong(buff, offset);
} | 3.68 |
pulsar_NamespaceIsolationPolicies_isSharedBroker | /**
* Check to see whether a broker is in the shared broker pool or not.
*
* @param host
* @return
*/
public boolean isSharedBroker(String host) {
for (NamespaceIsolationData policyData : this.policies.values()) {
NamespaceIsolationPolicyImpl policy = new NamespaceIsolationPolicyImpl(policyData);
if (policy.isPrimaryBroker(host)) {
// not free for sharing, this is some properties' primary broker
return false;
}
}
return true;
} | 3.68 |
flink_DoubleHashSet_contains | /** See {@link Double#equals(Object)}. */
public boolean contains(final double k) {
long longKey = Double.doubleToLongBits(k);
if (longKey == 0L) {
return this.containsZero;
} else {
double[] key = this.key;
long curr;
int pos;
if ((curr =
Double.doubleToLongBits(
key[pos = (int) MurmurHashUtil.fmix(longKey) & this.mask]))
== 0L) {
return false;
} else if (longKey == curr) {
return true;
} else {
while ((curr = Double.doubleToLongBits(key[pos = pos + 1 & this.mask])) != 0L) {
if (longKey == curr) {
return true;
}
}
return false;
}
}
} | 3.68 |
flink_StateTable_remove | /**
* Removes the mapping for the composite of active key and given namespace. This method should
* be preferred over {@link #removeAndGetOld(N)} when the caller is not interested in the old
* state.
*
* @param namespace the namespace of the mapping to remove. Not null.
*/
public void remove(N namespace) {
remove(keyContext.getCurrentKey(), keyContext.getCurrentKeyGroupIndex(), namespace);
} | 3.68 |
zxing_UPCEANReader_checkChecksum | /**
* @param s string of digits to check
* @return {@link #checkStandardUPCEANChecksum(CharSequence)}
* @throws FormatException if the string does not contain only digits
*/
boolean checkChecksum(String s) throws FormatException {
return checkStandardUPCEANChecksum(s);
} | 3.68 |
hbase_RegionCoprocessorHost_prePrepareBulkLoad | /////////////////////////////////////////////////////////////////////////////////////////////////
// BulkLoadObserver hooks
/////////////////////////////////////////////////////////////////////////////////////////////////
public void prePrepareBulkLoad(User user) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new BulkLoadObserverOperation(user) {
@Override
protected void call(BulkLoadObserver observer) throws IOException {
observer.prePrepareBulkLoad(this);
}
});
} | 3.68 |
hudi_StreamWriteFunction_getDataBuffer | // -------------------------------------------------------------------------
// Getter/Setter
// -------------------------------------------------------------------------
@VisibleForTesting
@SuppressWarnings("rawtypes")
public Map<String, List<HoodieRecord>> getDataBuffer() {
Map<String, List<HoodieRecord>> ret = new HashMap<>();
for (Map.Entry<String, DataBucket> entry : buckets.entrySet()) {
ret.put(entry.getKey(), entry.getValue().writeBuffer());
}
return ret;
} | 3.68 |
flink_NormalizedKeySorter_writeToOutput | /**
* Writes a subset of the records in this buffer in their logical order to the given output.
*
* @param output The output view to write the records to.
* @param start The logical start position of the subset.
* @param num The number of elements to write.
* @throws IOException Thrown, if an I/O exception occurred writing to the output view.
*/
@Override
public void writeToOutput(final ChannelWriterOutputView output, final int start, int num)
throws IOException {
int currentMemSeg = start / this.indexEntriesPerSegment;
int offset = (start % this.indexEntriesPerSegment) * this.indexEntrySize;
while (num > 0) {
final MemorySegment currentIndexSegment = this.sortIndex.get(currentMemSeg++);
// check whether we have a full or partially full segment
if (num >= this.indexEntriesPerSegment && offset == 0) {
// full segment
for (; offset <= this.lastIndexEntryOffset; offset += this.indexEntrySize) {
final long pointer = currentIndexSegment.getLong(offset) & POINTER_MASK;
this.recordBuffer.setReadPosition(pointer);
this.serializer.copy(this.recordBuffer, output);
}
num -= this.indexEntriesPerSegment;
} else {
// partially filled segment
for (;
num > 0 && offset <= this.lastIndexEntryOffset;
num--, offset += this.indexEntrySize) {
final long pointer = currentIndexSegment.getLong(offset) & POINTER_MASK;
this.recordBuffer.setReadPosition(pointer);
this.serializer.copy(this.recordBuffer, output);
}
}
offset = 0;
}
} | 3.68 |
querydsl_BeanMap_keySet | /**
* Get the keys for this BeanMap.
* <p>
* Write-only properties are <b>not</b> included in the returned set of
* property names, although it is possible to set their value and to get
* their type.
*
* @return BeanMap keys. The Set returned by this method is not
* modifiable.
*/
@Override
public Set<String> keySet() {
return readMethods.keySet();
} | 3.68 |
framework_Flash_setStandby | /**
* Sets standby.
*
* @param standby
* Standby string.
*/
public void setStandby(String standby) {
if (standby != getState().standby
|| (standby != null && !standby.equals(getState().standby))) {
getState().standby = standby;
requestRepaint();
}
} | 3.68 |
hbase_HFileCorruptionChecker_getCorrupted | /** Returns the set of corrupted file paths after checkTables is called. */
public Collection<Path> getCorrupted() {
return new HashSet<>(corrupted);
} | 3.68 |
hbase_WALPrettyPrinter_enableValues | /**
* turns value output on
*/
public void enableValues() {
outputValues = true;
} | 3.68 |
morf_MySqlDialect_makeStringLiteral | /**
* Backslashes in MySQL denote escape sequences and have to themselves be escaped.
*
* @see <a href="http://dev.mysql.com/doc/refman/5.0/en/string-literals.html">String Literals</a>
* @see org.alfasoftware.morf.jdbc.SqlDialect#makeStringLiteral(java.lang.String)
*/
@Override
protected String makeStringLiteral(String literalValue) {
if (StringUtils.isEmpty(literalValue)) {
return "NULL";
}
return String.format("'%s'", StringUtils.replace(super.escapeSql(literalValue), "\\", "\\\\"));
} | 3.68 |
hbase_HMobStore_getPath | /**
* Gets the mob file path.
* @return The mob file path.
*/
public Path getPath() {
return mobFamilyPath;
} | 3.68 |
flink_CatalogManager_createTable | /**
* Creates a table in a given fully qualified path.
*
* @param table The table to put in the given path.
* @param objectIdentifier The fully qualified path where to put the table.
* @param ignoreIfExists If false exception will be thrown if a table exists in the given path.
*/
public void createTable(
CatalogBaseTable table, ObjectIdentifier objectIdentifier, boolean ignoreIfExists) {
execute(
(catalog, path) -> {
ResolvedCatalogBaseTable<?> resolvedTable = resolveCatalogBaseTable(table);
ResolvedCatalogBaseTable<?> resolvedListenedTable =
managedTableListener.notifyTableCreation(
catalog,
objectIdentifier,
resolvedTable,
false,
ignoreIfExists);
catalog.createTable(path, resolvedListenedTable, ignoreIfExists);
if (resolvedListenedTable instanceof CatalogTable) {
catalogModificationListeners.forEach(
listener ->
listener.onEvent(
CreateTableEvent.createEvent(
CatalogContext.createContext(
objectIdentifier.getCatalogName(),
catalog),
objectIdentifier,
resolvedListenedTable,
ignoreIfExists,
false)));
}
},
objectIdentifier,
false,
"CreateTable");
} | 3.68 |
hadoop_Anonymizer_anonymizeTrace | // anonymize the job trace file
private void anonymizeTrace() throws Exception {
if (anonymizeTrace) {
System.out.println("Anonymizing trace file: " + inputTracePath);
JobTraceReader reader = null;
JsonGenerator outGen = null;
Configuration conf = getConf();
try {
// create a generator
outGen = createJsonGenerator(conf, outputTracePath);
// define the input trace reader
reader = new JobTraceReader(inputTracePath, conf);
// read the plain unanonymized logged job
LoggedJob job = reader.getNext();
while (job != null) {
// write it via an anonymizing channel
outGen.writeObject(job);
// read the next job
job = reader.getNext();
}
System.out.println("Anonymized trace file: " + outputTracePath);
} finally {
if (outGen != null) {
outGen.close();
}
if (reader != null) {
reader.close();
}
}
}
} | 3.68 |
hmily_JavaBeanBinder_isSettable | /**
* Is settable boolean.
*
* @return the boolean
*/
boolean isSettable() {
return this.setter != null;
} | 3.68 |
hadoop_MultipleOutputFormat_generateFileNameForKeyValue | /**
* Generate the file output file name based on the given key and the leaf file
* name. The default behavior is that the file name does not depend on the
* key.
*
* @param key
* the key of the output data
* @param name
* the leaf file name
* @return generated file name
*/
protected String generateFileNameForKeyValue(K key, V value, String name) {
return name;
} | 3.68 |
hadoop_AbstractRouterPolicy_getHomeSubcluster | /**
* Simply picks from alphabetically-sorted active subclusters based on the
* hash of query name. Jobs of the same queue will all be routed to the same
* sub-cluster, as far as the number of active sub-cluster and their names
* remain the same.
*
* @param appContext the {@link ApplicationSubmissionContext} that
* has to be routed to an appropriate subCluster for execution.
*
* @param blackLists the list of subClusters as identified by
* {@link SubClusterId} to blackList from the selection of the home
* subCluster.
*
* @return a hash-based chosen {@link SubClusterId} that will be the "home"
* for this application.
*
* @throws YarnException if there are no active subclusters.
*/
@Override
public SubClusterId getHomeSubcluster(ApplicationSubmissionContext appContext,
List<SubClusterId> blackLists) throws YarnException {
// null checks and default-queue behavior
validate(appContext);
// apply filtering based on reservation location and active sub-clusters
Map<SubClusterId, SubClusterInfo> filteredSubClusters = prefilterSubClusters(
appContext.getReservationID(), getActiveSubclusters());
FederationPolicyUtils.validateSubClusterAvailability(filteredSubClusters.keySet(), blackLists);
// remove black SubCluster
if (blackLists != null) {
blackLists.forEach(filteredSubClusters::remove);
}
// pick the chosen subCluster from the active ones
return chooseSubCluster(appContext.getQueue(), filteredSubClusters);
} | 3.68 |
hudi_HoodieFileGroup_addBaseFile | /**
* Add a new datafile into the file group.
*/
public void addBaseFile(HoodieBaseFile dataFile) {
if (!fileSlices.containsKey(dataFile.getCommitTime())) {
fileSlices.put(dataFile.getCommitTime(), new FileSlice(fileGroupId, dataFile.getCommitTime()));
}
fileSlices.get(dataFile.getCommitTime()).setBaseFile(dataFile);
} | 3.68 |
hadoop_OBSBlockOutputStream_abort | /**
* Abort a multi-part upload. Retries are attempted on failures.
* IOExceptions are caught; this is expected to be run as a cleanup
* process.
*/
void abort() {
String operation =
String.format(
"Aborting multi-part upload for '%s', id '%s",
writeOperationHelper, uploadId);
try {
LOG.debug(operation);
writeOperationHelper.abortMultipartUpload(key, uploadId);
} catch (ObsException e) {
LOG.warn(
"Unable to abort multipart upload, you may need to purge "
+ "uploaded parts",
e);
}
} | 3.68 |
hadoop_TimelineEntityType_isParent | /**
* Whether the input type can be a parent of this entity.
*
* @param type entity type.
* @return true, if this entity type is parent of passed entity type, false
* otherwise.
*/
public boolean isParent(TimelineEntityType type) {
switch (this) {
case YARN_CLUSTER:
return false;
case YARN_FLOW_RUN:
return YARN_FLOW_RUN == type || YARN_CLUSTER == type;
case YARN_APPLICATION:
return YARN_FLOW_RUN == type || YARN_CLUSTER == type;
case YARN_APPLICATION_ATTEMPT:
return YARN_APPLICATION == type;
case YARN_CONTAINER:
return YARN_APPLICATION_ATTEMPT == type;
case YARN_QUEUE:
return YARN_QUEUE == type;
default:
return false;
}
} | 3.68 |
flink_BigDecComparator_putNormalizedKey | /**
* Adds a normalized key containing a normalized order of magnitude of the given record. 2 bits
* determine the sign (negative, zero, positive), 33 bits determine the magnitude. This method
* adds at most 5 bytes that contain information.
*/
@Override
public void putNormalizedKey(BigDecimal record, MemorySegment target, int offset, int len) {
final long signum = record.signum();
// order of magnitude
// smallest:
// scale = Integer.MAX, precision = 1 => SMALLEST_MAGNITUDE
// largest:
// scale = Integer.MIN, precision = Integer.MAX => LARGEST_MAGNITUDE
final long mag = ((long) record.scale()) - ((long) record.precision()) + 1;
// normalize value range: from 0 to (SMALLEST_MAGNITUDE + -1*LARGEST_MAGNITUDE)
final long normMag = -1L * LARGEST_MAGNITUDE + mag;
// normalize value range dependent on sign:
// 0 to (SMALLEST_MAGNITUDE + -1*LARGEST_MAGNITUDE)
// OR (SMALLEST_MAGNITUDE + -1*LARGEST_MAGNITUDE) to 0
// --> uses at most 33 bit (5 least-significant bytes)
long signNormMag =
signum < 0 ? normMag : (SMALLEST_MAGNITUDE + -1L * LARGEST_MAGNITUDE - normMag);
// zero has no magnitude
// set 34th bit to flag zero
if (signum == 0) {
signNormMag = 0L;
signNormMag |= (1L << 34);
}
// set 35th bit to flag positive sign
else if (signum > 0) {
signNormMag |= (1L << 35);
}
// add 5 least-significant bytes that contain value to target
for (int i = 0; i < 5 && len > 0; i++, len--) {
final byte b = (byte) (signNormMag >>> (8 * (4 - i)));
target.put(offset++, b);
}
} | 3.68 |
framework_DragSourceExtensionConnector_createDataTransferData | /**
* Creates the data map to be set as the {@code DataTransfer} object's data.
*
* @param dragStartEvent
* The drag start event
* @return The map from type to data, or {@code null} for not setting any
* data. Returning {@code null} will cancel the drag start.
*/
protected Map<String, String> createDataTransferData(
NativeEvent dragStartEvent) {
Map<String, String> orderedData = new LinkedHashMap<>();
for (String type : getState().types) {
orderedData.put(type, getState().data.get(type));
}
// Add payload for comparing against acceptance criteria
getState().payload.values().forEach(payload -> orderedData
.put(payload.getPayloadString(), payload.getValue()));
return orderedData;
} | 3.68 |
flink_KeyedStream_validateKeyTypeIsHashable | /**
* Validates that a given type of element (as encoded by the provided {@link TypeInformation})
* can be used as a key in the {@code DataStream.keyBy()} operation.
*
* @param type The {@link TypeInformation} of the type to check.
* @return {@code false} if:
* <ol>
* <li>it is a POJO type but does not override the {@link #hashCode()} method and relies
* on the {@link Object#hashCode()} implementation.
* <li>it is an array of any type (see {@link PrimitiveArrayTypeInfo}, {@link
* BasicArrayTypeInfo}, {@link ObjectArrayTypeInfo}).
* <li>it is enum type
* </ol>
* , {@code true} otherwise.
*/
private boolean validateKeyTypeIsHashable(TypeInformation<?> type) {
try {
return (type instanceof PojoTypeInfo)
? !type.getTypeClass()
.getMethod("hashCode")
.getDeclaringClass()
.equals(Object.class)
: !(isArrayType(type) || isEnumType(type));
} catch (NoSuchMethodException ignored) {
// this should never happen as we are just searching for the hashCode() method.
}
return false;
} | 3.68 |
hadoop_DockerCommandExecutor_isStartable | /**
* Is the container in a startable state?
*
* @param containerStatus the container's {@link DockerContainerStatus}.
* @return is the container in a startable state.
*/
public static boolean isStartable(DockerContainerStatus containerStatus) {
if (containerStatus.equals(DockerContainerStatus.EXITED)
|| containerStatus.equals(DockerContainerStatus.STOPPED)) {
return true;
}
return false;
} | 3.68 |
hadoop_WebAppProxyServer_doSecureLogin | /**
* Log in as the Kerberos principal designated for the proxy
* @param conf the configuration holding this information in it.
* @throws IOException on any error.
*/
protected void doSecureLogin(Configuration conf) throws IOException {
InetSocketAddress socAddr = getBindAddress(conf);
SecurityUtil.login(conf, YarnConfiguration.PROXY_KEYTAB,
YarnConfiguration.PROXY_PRINCIPAL, socAddr.getHostName());
} | 3.68 |
flink_SplitDataProperties_splitsPartitionedBy | /**
* Defines that data is partitioned using an identifiable method across input splits on the
* fields defined by field expressions. Multiple field expressions must be separated by the
* semicolon ';' character. All records sharing the same key (combination) must be contained in
* a single input split.
*
* <p><b> IMPORTANT: Providing wrong information with SplitDataProperties can cause wrong
* results! </b>
*
* @param partitionMethodId An ID for the method that was used to partition the data across
* splits.
* @param partitionFields The field expressions of the partitioning keys.
* @return This SplitDataProperties object.
*/
public SplitDataProperties<T> splitsPartitionedBy(
String partitionMethodId, String partitionFields) {
if (partitionFields == null) {
throw new InvalidProgramException("PartitionFields may not be null.");
}
String[] partitionKeysA = partitionFields.split(";");
if (partitionKeysA.length == 0) {
throw new InvalidProgramException("PartitionFields may not be empty.");
}
this.splitPartitionKeys = getAllFlatKeys(partitionKeysA);
if (partitionMethodId != null) {
this.splitPartitioner = new SourcePartitionerMarker<>(partitionMethodId);
} else {
this.splitPartitioner = null;
}
return this;
} | 3.68 |
flink_SerdeUtils_serializeSplitAssignments | /**
* Serialize a mapping from subtask ids to lists of assigned splits. The serialized format is
* following:
*
* <pre>
* 4 bytes - number of subtasks
* 4 bytes - split serializer version
* N bytes - [assignment_for_subtask]
* 4 bytes - subtask id
* 4 bytes - number of assigned splits
* N bytes - [assigned_splits]
* 4 bytes - serialized split length
* N bytes - serialized splits
* </pre>
*
* @param splitAssignments a mapping from subtask ids to lists of assigned splits.
* @param splitSerializer the serializer of the split.
* @param <SplitT> the type of the splits.
* @param <C> the type of the collection to hold the assigned splits for a subtask.
* @return the serialized bytes of the given subtask to splits assignment mapping.
* @throws IOException when serialization failed.
*/
public static <SplitT extends SourceSplit, C extends Collection<SplitT>>
byte[] serializeSplitAssignments(
Map<Integer, C> splitAssignments,
SimpleVersionedSerializer<SplitT> splitSerializer)
throws IOException {
try (ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream out = new DataOutputStream(baos)) {
out.writeInt(splitAssignments.size());
// Split serializer version.
out.writeInt(splitSerializer.getVersion());
// Write assignments for subtasks.
for (Map.Entry<Integer, C> entry : splitAssignments.entrySet()) {
// Subtask ID
int subtaskId = entry.getKey();
Collection<SplitT> splitsForSubtask = entry.getValue();
// Number of the splits.
out.writeInt(subtaskId);
out.writeInt(splitsForSubtask.size());
for (SplitT split : splitsForSubtask) {
byte[] serializedSplit = splitSerializer.serialize(split);
out.writeInt(serializedSplit.length);
out.write(serializedSplit);
}
}
return baos.toByteArray();
}
} | 3.68 |
flink_StringUtils_isNullOrWhitespaceOnly | /**
* Checks if the string is null, empty, or contains only whitespace characters. A whitespace
* character is defined via {@link Character#isWhitespace(char)}.
*
* @param str The string to check
* @return True, if the string is null or blank, false otherwise.
*/
public static boolean isNullOrWhitespaceOnly(String str) {
if (str == null || str.length() == 0) {
return true;
}
final int len = str.length();
for (int i = 0; i < len; i++) {
if (!Character.isWhitespace(str.charAt(i))) {
return false;
}
}
return true;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.