name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
dubbo_SlidingWindow_calculatePaneIdx | /**
* Calculate the pane index corresponding to the specified timestamp.
*
* @param timeMillis the specified timestamp.
* @return the pane index corresponding to the specified timestamp.
*/
private int calculatePaneIdx(long timeMillis) {
return (int) ((timeMillis / paneIntervalInMs) % paneCount);
} | 3.68 |
graphhopper_LandmarkStorage_getSubnetworksWithLandmarks | /**
* @return the number of subnetworks that have landmarks
*/
public int getSubnetworksWithLandmarks() {
return landmarkIDs.size();
} | 3.68 |
hadoop_UnmanagedApplicationManager_monitorCurrentAppAttempt | /**
* Monitor the submitted application and attempt until it reaches certain
* states.
*
* @param appId Application Id of application to be monitored
* @param appStates acceptable application state
* @param attemptState acceptable application attempt state
* @return the application report
* @throws YarnException if getApplicationReport fails
* @throws IOException if getApplicationReport fails
*/
private ApplicationAttemptReport monitorCurrentAppAttempt(ApplicationId appId,
Set<YarnApplicationState> appStates, YarnApplicationAttemptState attemptState)
throws YarnException, IOException {
long startTime = System.currentTimeMillis();
ApplicationAttemptId appAttemptId = null;
while (true) {
if (appAttemptId == null) {
// Get application report for the appId we are interested in
ApplicationReport report = getApplicationReport(appId);
YarnApplicationState state = report.getYarnApplicationState();
if (appStates.contains(state)) {
if (state != YarnApplicationState.ACCEPTED) {
throw new YarnRuntimeException(
"Received non-accepted application state: " + state + " for "
+ appId + ". This is likely because this is not the first "
+ "app attempt in home sub-cluster, and AMRMProxy HA "
+ "(yarn.nodemanager.amrmproxy.ha.enable) is not enabled.");
}
appAttemptId =
getApplicationReport(appId).getCurrentApplicationAttemptId();
} else {
LOG.info("Current application state of {} is {}, will retry later.",
appId, state);
}
}
if (appAttemptId != null) {
GetApplicationAttemptReportRequest req =
this.recordFactory.newRecordInstance(GetApplicationAttemptReportRequest.class);
req.setApplicationAttemptId(appAttemptId);
GetApplicationAttemptReportResponse appAttemptReport =
this.rmClient.getApplicationAttemptReport(req);
ApplicationAttemptReport attemptReport = appAttemptReport.getApplicationAttemptReport();
YarnApplicationAttemptState appAttemptState =
attemptReport.getYarnApplicationAttemptState();
if (attemptState.equals(appAttemptState)) {
return attemptReport;
}
LOG.info("Current attempt state of {} is {}, waiting for current attempt to reach {}.",
appAttemptId, appAttemptState, attemptState);
}
try {
Thread.sleep(this.asyncApiPollIntervalMillis);
} catch (InterruptedException e) {
LOG.warn("Interrupted while waiting for current attempt of {} to reach {}.",
appId, attemptState);
}
if (System.currentTimeMillis() - startTime > AM_STATE_WAIT_TIMEOUT_MS) {
throw new RuntimeException("Timeout for waiting current attempt of "
+ appId + " to reach " + attemptState);
}
}
} | 3.68 |
flink_MiniCluster_startTaskManager | /**
* Starts additional TaskManager process.
*
* <p>When the MiniCluster starts up, it always starts {@link
* MiniClusterConfiguration#getNumTaskManagers} TaskManagers. All TaskManagers are indexed from
* 0 to the number of TaskManagers, started so far, minus one. This method starts a TaskManager
* with the next index which is the number of TaskManagers, started so far. The index always
* increases with each new started TaskManager. The indices of terminated TaskManagers are not
* reused after {@link #terminateTaskManager(int)}.
*/
public void startTaskManager() throws Exception {
synchronized (lock) {
final Configuration configuration = miniClusterConfiguration.getConfiguration();
final TaskExecutor taskExecutor =
TaskManagerRunner.startTaskManager(
configuration,
new ResourceID(UUID.randomUUID().toString()),
taskManagerRpcServiceFactory.createRpcService(),
haServices,
heartbeatServices,
metricRegistry,
blobCacheService,
useLocalCommunication(),
ExternalResourceInfoProvider.NO_EXTERNAL_RESOURCES,
workingDirectory.createSubWorkingDirectory("tm_" + taskManagers.size()),
taskManagerTerminatingFatalErrorHandlerFactory.create(
taskManagers.size()),
delegationTokenReceiverRepository);
taskExecutor.start();
taskManagers.add(taskExecutor);
}
} | 3.68 |
morf_TableReference_getDblink | /**
* Get the dblink for the table
*
* @return the dblink
*/
public String getDblink() {
return dblink;
} | 3.68 |
flink_SpillingBuffer_close | /**
* @return A list with all memory segments that have been taken from the memory segment source.
*/
public List<MemorySegment> close() throws IOException {
final ArrayList<MemorySegment> segments =
new ArrayList<MemorySegment>(
this.fullSegments.size() + this.numMemorySegmentsInWriter);
// if the buffer is still being written, clean that up
if (getCurrentSegment() != null) {
segments.add(getCurrentSegment());
clear();
}
moveAll(this.fullSegments, segments);
this.fullSegments.clear();
// clean up the writer
if (this.writer != null) {
// closing before the first flip, collect the memory in the writer
this.writer.close();
for (int i = this.numMemorySegmentsInWriter; i > 0; i--) {
segments.add(this.writer.getNextReturnedBlock());
}
this.writer.closeAndDelete();
this.writer = null;
}
// clean up the views
if (this.inMemInView != null) {
this.inMemInView = null;
}
if (this.externalInView != null) {
if (!this.externalInView.isClosed()) {
this.externalInView.close();
}
this.externalInView = null;
}
return segments;
} | 3.68 |
hudi_AvroSchemaConverter_convertToDataType | /**
* Converts an Avro schema {@code schema} into a nested row structure with deterministic field order and
* data types that are compatible with Flink's Table & SQL API.
*
* @param schema Avro schema definition
* @return data type matching the schema
*/
public static DataType convertToDataType(Schema schema) {
switch (schema.getType()) {
case RECORD:
final List<Schema.Field> schemaFields = schema.getFields();
final DataTypes.Field[] fields = new DataTypes.Field[schemaFields.size()];
for (int i = 0; i < schemaFields.size(); i++) {
final Schema.Field field = schemaFields.get(i);
fields[i] = DataTypes.FIELD(field.name(), convertToDataType(field.schema()));
}
return DataTypes.ROW(fields).notNull();
case ENUM:
case STRING:
// convert Avro's Utf8/CharSequence to String
return DataTypes.STRING().notNull();
case ARRAY:
return DataTypes.ARRAY(convertToDataType(schema.getElementType())).notNull();
case MAP:
return DataTypes.MAP(
DataTypes.STRING().notNull(),
convertToDataType(schema.getValueType()))
.notNull();
case UNION:
final Schema actualSchema;
final boolean nullable;
if (schema.getTypes().size() == 2
&& schema.getTypes().get(0).getType() == Schema.Type.NULL) {
actualSchema = schema.getTypes().get(1);
nullable = true;
} else if (schema.getTypes().size() == 2
&& schema.getTypes().get(1).getType() == Schema.Type.NULL) {
actualSchema = schema.getTypes().get(0);
nullable = true;
} else if (schema.getTypes().size() == 1) {
actualSchema = schema.getTypes().get(0);
nullable = false;
} else {
List<Schema> nonNullTypes = schema.getTypes().stream()
.filter(s -> s.getType() != Schema.Type.NULL)
.collect(Collectors.toList());
nullable = schema.getTypes().size() > nonNullTypes.size();
// use Kryo for serialization
DataType rawDataType = new AtomicDataType(
new TypeInformationRawType<>(false, Types.GENERIC(Object.class)))
.notNull();
if (recordTypesOfSameNumFields(nonNullTypes)) {
DataType converted = DataTypes.ROW(
DataTypes.FIELD("wrapper", rawDataType))
.notNull();
return nullable ? converted.nullable() : converted;
}
// use Kryo for serialization
return nullable ? rawDataType.nullable() : rawDataType;
}
DataType converted = convertToDataType(actualSchema);
return nullable ? converted.nullable() : converted;
case FIXED:
// logical decimal type
if (schema.getLogicalType() instanceof LogicalTypes.Decimal) {
final LogicalTypes.Decimal decimalType =
(LogicalTypes.Decimal) schema.getLogicalType();
return DataTypes.DECIMAL(decimalType.getPrecision(), decimalType.getScale())
.notNull();
}
// convert fixed size binary data to primitive byte arrays
return DataTypes.VARBINARY(schema.getFixedSize()).notNull();
case BYTES:
// logical decimal type
if (schema.getLogicalType() instanceof LogicalTypes.Decimal) {
final LogicalTypes.Decimal decimalType =
(LogicalTypes.Decimal) schema.getLogicalType();
return DataTypes.DECIMAL(decimalType.getPrecision(), decimalType.getScale())
.notNull();
}
return DataTypes.BYTES().notNull();
case INT:
// logical date and time type
final org.apache.avro.LogicalType logicalType = schema.getLogicalType();
if (logicalType == LogicalTypes.date()) {
return DataTypes.DATE().notNull();
} else if (logicalType == LogicalTypes.timeMillis()) {
return DataTypes.TIME(3).notNull();
}
return DataTypes.INT().notNull();
case LONG:
// logical timestamp type
if (schema.getLogicalType() == LogicalTypes.timestampMillis()) {
return DataTypes.TIMESTAMP(3).notNull();
} else if (schema.getLogicalType() == LogicalTypes.localTimestampMillis()) {
return DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(3).notNull();
} else if (schema.getLogicalType() == LogicalTypes.timestampMicros()) {
return DataTypes.TIMESTAMP(6).notNull();
} else if (schema.getLogicalType() == LogicalTypes.localTimestampMicros()) {
return DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(6).notNull();
} else if (schema.getLogicalType() == LogicalTypes.timeMillis()) {
return DataTypes.TIME(3).notNull();
} else if (schema.getLogicalType() == LogicalTypes.timeMicros()) {
return DataTypes.TIME(6).notNull();
}
return DataTypes.BIGINT().notNull();
case FLOAT:
return DataTypes.FLOAT().notNull();
case DOUBLE:
return DataTypes.DOUBLE().notNull();
case BOOLEAN:
return DataTypes.BOOLEAN().notNull();
case NULL:
return DataTypes.NULL();
default:
throw new IllegalArgumentException("Unsupported Avro type '" + schema.getType() + "'.");
}
} | 3.68 |
flink_DataSourceTask_getLogString | /**
* Utility function that composes a string for logging purposes. The string includes the given
* message and the index of the task in its task group together with the number of tasks in the
* task group.
*
* @param message The main message for the log.
* @param taskName The name of the task.
* @return The string ready for logging.
*/
private String getLogString(String message, String taskName) {
return BatchTask.constructLogString(message, taskName, this);
} | 3.68 |
flink_FileSourceSplit_updateWithCheckpointedPosition | /**
* Creates a copy of this split where the checkpointed position is replaced by the given new
* position.
*
* <p><b>IMPORTANT:</b> Subclasses that add additional information to the split must override
* this method to return that subclass type. This contract is enforced by checks in the file
* source implementation. We did not try to enforce this contract via generics in this split
* class, because it leads to very ugly and verbose use of generics.
*/
public FileSourceSplit updateWithCheckpointedPosition(@Nullable CheckpointedPosition position) {
return new FileSourceSplit(
id, filePath, offset, length, fileModificationTime, fileSize, hostnames, position);
} | 3.68 |
hadoop_LoggingAuditor_activate | /**
* Activate: log at TRACE.
* @return this span.
*/
@Override
public AuditSpanS3A activate() {
LOG.trace("[{}] {} Activate {}",
currentThreadID(), getSpanId(), getDescription());
return this;
} | 3.68 |
morf_AbstractSqlDialectTest_expectedSqlForMathOperations16 | /**
* @return expected SQL for math operation 16
*/
protected String expectedSqlForMathOperations16() {
return "a + b + c / 2 + z";
} | 3.68 |
framework_Navigator_getStateManager | /**
* Returns the {@link NavigationStateManager} that is used to get, listen to
* and manipulate the navigation state used by this Navigator.
*
* @return NavigationStateManager in use
*/
protected NavigationStateManager getStateManager() {
return stateManager;
} | 3.68 |
hbase_HRegionServer_checkCodecs | /**
* Run test on configured codecs to make sure supporting libs are in place.
*/
private static void checkCodecs(final Configuration c) throws IOException {
// check to see if the codec list is available:
String[] codecs = c.getStrings(REGIONSERVER_CODEC, (String[]) null);
if (codecs == null) {
return;
}
for (String codec : codecs) {
if (!CompressionTest.testCompression(codec)) {
throw new IOException(
"Compression codec " + codec + " not supported, aborting RS construction");
}
}
} | 3.68 |
morf_FieldLiteral_getValue | /**
* @return the value
*/
public String getValue() {
return value;
} | 3.68 |
dubbo_TriHttp2RemoteFlowController_enqueueFrame | /**
* Add a frame to be sent via flow control.
* @param state The state associated with the stream which the {@code frame} is associated with.
* @param frame the frame to enqueue.
* @throws Http2Exception If a writability error occurs.
*/
void enqueueFrame(FlowState state, FlowControlled frame) throws Http2Exception {
state.enqueueFrame(frame);
} | 3.68 |
flink_AbstractRowTimeUnboundedPrecedingOver_insertToSortedList | /**
* Inserts timestamps in order into a linked list. If timestamps arrive in order (as in case of
* using the RocksDB state backend) this is just an append with O(1).
*/
private void insertToSortedList(Long recordTimestamp) {
ListIterator<Long> listIterator = sortedTimestamps.listIterator(sortedTimestamps.size());
boolean isContinue = true;
while (listIterator.hasPrevious() && isContinue) {
Long timestamp = listIterator.previous();
if (recordTimestamp >= timestamp) {
listIterator.next();
listIterator.add(recordTimestamp);
isContinue = false;
}
}
if (isContinue) {
sortedTimestamps.addFirst(recordTimestamp);
}
} | 3.68 |
graphhopper_VectorTile_clearKeys | /**
* <pre>
* Dictionary encoding for keys
* </pre>
*
* <code>repeated string keys = 3;</code>
*/
public Builder clearKeys() {
keys_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000008);
onChanged();
return this;
} | 3.68 |
dubbo_StringUtils_removeEnd | /**
* <p>Removes a substring only if it is at the end of a source string,
* otherwise returns the source string.</p>
*
* <p>A {@code null} source string will return {@code null}.
* An empty ("") source string will return the empty string.
* A {@code null} search string will return the source string.</p>
*
* <pre>
* StringUtils.removeEnd(null, *) = null
* StringUtils.removeEnd("", *) = ""
* StringUtils.removeEnd(*, null) = *
* StringUtils.removeEnd("www.domain.com", ".com.") = "www.domain.com"
* StringUtils.removeEnd("www.domain.com", ".com") = "www.domain"
* StringUtils.removeEnd("www.domain.com", "domain") = "www.domain.com"
* StringUtils.removeEnd("abc", "") = "abc"
* </pre>
*
* @param str the source String to search, may be null
* @param remove the String to search for and remove, may be null
* @return the substring with the string removed if found,
* {@code null} if null String input
*/
public static String removeEnd(final String str, final String remove) {
if (isAnyEmpty(str, remove)) {
return str;
}
if (str.endsWith(remove)) {
return str.substring(0, str.length() - remove.length());
}
return str;
} | 3.68 |
hadoop_SingleFilePerBlockCache_blocks | /**
* Gets the blocks in this cache.
*/
@Override
public Iterable<Integer> blocks() {
return Collections.unmodifiableList(new ArrayList<>(blocks.keySet()));
} | 3.68 |
framework_VScrollTable_handleTouchEvent | /**
* Special handler for touch devices that support native scrolling
*
* @return Whether the event was handled by this method.
*/
private boolean handleTouchEvent(final Event event) {
boolean touchEventHandled = false;
if (enabled && HAS_NATIVE_TOUCH_SCROLLLING) {
touchContextProvider.handleTouchEvent(event);
final Element targetTdOrTr = getEventTargetTdOrTr(event);
final int type = event.getTypeInt();
switch (type) {
case Event.ONTOUCHSTART:
touchEventHandled = true;
touchStart = event;
isDragging = false;
Touch touch = event.getChangedTouches().get(0);
// save position to fields, touches in events are same
// instance during the operation.
touchStartX = touch.getClientX();
touchStartY = touch.getClientY();
if (dragmode != 0) {
if (dragTouchTimeout == null) {
dragTouchTimeout = new Timer() {
@Override
public void run() {
if (touchStart != null) {
// Start a drag if a finger is held
// in place long enough, then moved
isDragging = true;
}
}
};
}
dragTouchTimeout.schedule(TOUCHSCROLL_TIMEOUT);
}
if (actionKeys != null) {
if (contextTouchTimeout == null) {
contextTouchTimeout = new Timer() {
@Override
public void run() {
if (touchStart != null) {
// Open the context menu if finger
// is held in place long enough.
showContextMenu(touchStart);
event.preventDefault();
touchStart = null;
}
}
};
}
contextTouchTimeout
.schedule(TOUCH_CONTEXT_MENU_TIMEOUT);
event.stopPropagation();
}
break;
case Event.ONTOUCHMOVE:
touchEventHandled = true;
if (isSignificantMove(event)) {
if (contextTouchTimeout != null) {
// Moved finger before the context menu timer
// expired, so let the browser handle this as a
// scroll.
contextTouchTimeout.cancel();
contextTouchTimeout = null;
}
if (!isDragging && dragTouchTimeout != null) {
// Moved finger before the drag timer expired,
// so let the browser handle this as a scroll.
dragTouchTimeout.cancel();
dragTouchTimeout = null;
}
if (dragmode != 0 && touchStart != null
&& isDragging) {
event.preventDefault();
event.stopPropagation();
startRowDrag(touchStart, type, targetTdOrTr);
}
touchStart = null;
}
break;
case Event.ONTOUCHEND:
case Event.ONTOUCHCANCEL:
touchEventHandled = true;
if (contextTouchTimeout != null) {
contextTouchTimeout.cancel();
}
if (dragTouchTimeout != null) {
dragTouchTimeout.cancel();
}
if (touchStart != null) {
if (!BrowserInfo.get().isAndroid()) {
event.preventDefault();
WidgetUtil.simulateClickFromTouchEvent(
touchStart, this);
}
event.stopPropagation();
touchStart = null;
}
isDragging = false;
break;
}
}
return touchEventHandled;
} | 3.68 |
AreaShop_Utils_initialize | /**
* Initialize the utilities class with constants.
* @param pluginConfig The config of the plugin
*/
public static void initialize(YamlConfiguration pluginConfig) {
config = pluginConfig;
// Setup individual identifiers
seconds = getSetAndDefaults("seconds");
minutes = getSetAndDefaults("minutes");
hours = getSetAndDefaults("hours");
days = getSetAndDefaults("days");
weeks = getSetAndDefaults("weeks");
months = getSetAndDefaults("months");
years = getSetAndDefaults("years");
// Setup all time identifiers
identifiers = new HashSet<>();
identifiers.addAll(seconds);
identifiers.addAll(minutes);
identifiers.addAll(hours);
identifiers.addAll(days);
identifiers.addAll(weeks);
identifiers.addAll(months);
identifiers.addAll(years);
suffixes = new HashMap<>();
// This stuff should not be necessary, but it is, getConfigurationSection() does not do proper fallback to defaults!
// TODO: Create a custom configuration that fixes this behavior
ConfigurationSection suffixesSection = null;
if(config.isSet("metricSymbols")) {
suffixesSection = config.getConfigurationSection("metricSymbols");
} else {
Configuration defaults = config.getDefaults();
if(defaults != null) {
suffixesSection = defaults.getConfigurationSection("metricSymbols");
}
}
if(suffixesSection != null) {
for(String key : suffixesSection.getKeys(false)) {
try {
suffixes.put(Double.parseDouble(key), suffixesSection.getString(key));
} catch(NumberFormatException e) {
Log.warn("Key '" + key + "' in the metricSymbols section of config.yml is not a number!");
}
}
}
} | 3.68 |
framework_VCalendar_distributeSize | /**
* Calculates correct size for all cells (size / amount of cells ) and
* distributes any overflow over all the cells.
*
* @param totalSize
* the total amount of size reserved for all cells
* @param numberOfCells
* the number of cells
* @param sizeModifier
* a modifier which is applied to all cells before distributing
* the overflow
* @return an integer array that contains the correct size for each cell
*/
public static int[] distributeSize(int totalSize, int numberOfCells,
int sizeModifier) {
int[] cellSizes = new int[numberOfCells];
int startingSize = totalSize / numberOfCells;
int cellSizeOverFlow = totalSize % numberOfCells;
for (int i = 0; i < numberOfCells; i++) {
cellSizes[i] = startingSize + sizeModifier;
}
// distribute size overflow amongst all slots
int j = 0;
while (cellSizeOverFlow > 0) {
cellSizes[j]++;
cellSizeOverFlow--;
j++;
if (j >= numberOfCells) {
j = 0;
}
}
// cellSizes[numberOfCells - 1] += cellSizeOverFlow;
return cellSizes;
} | 3.68 |
pulsar_ClientConfiguration_getConcurrentLookupRequest | /**
* Get configured total allowed concurrent lookup-request.
*
* @return
*/
public int getConcurrentLookupRequest() {
return confData.getConcurrentLookupRequest();
} | 3.68 |
hbase_Client_post | /**
* Send a POST request
* @param cluster the cluster definition
* @param path the path or URI
* @param headers the HTTP headers to include, <tt>Content-Type</tt> must be supplied
* @param content the content bytes
* @return a Response object with response detail
*/
public Response post(Cluster cluster, String path, Header[] headers, byte[] content)
throws IOException {
HttpPost method = new HttpPost(path);
try {
method.setEntity(new InputStreamEntity(new ByteArrayInputStream(content), content.length));
HttpResponse resp = execute(cluster, method, headers, path);
headers = resp.getAllHeaders();
content = getResponseBody(resp);
return new Response(resp.getStatusLine().getStatusCode(), headers, content);
} finally {
method.releaseConnection();
}
} | 3.68 |
hadoop_MawoConfiguration_getTeardownWorkerValidityInterval | /**
* Get Teardown worker validity interval.
* @return value of master.teardown-worker.validity-interval.ms
*/
public long getTeardownWorkerValidityInterval() {
return Long.parseLong(configsMap.get(
MASTER_TEARDOWN_WORKER_VALIDITY_INTERVAL_MS));
} | 3.68 |
framework_ReflectTools_setJavaFieldValue | /**
* Sets the value of a java field.
* <p>
* Uses setter if present, otherwise tries to access even private fields
* directly.
*
* @param object
* The object containing the field
* @param field
* The field we want to set the value for
* @param value
* The value to set
* @throws IllegalAccessException
* If the value could not be assigned to the field
* @throws IllegalArgumentException
* If the value could not be assigned to the field
* @throws InvocationTargetException
* If the value could not be assigned to the field
*/
public static void setJavaFieldValue(Object object, Field field,
Object value) throws IllegalAccessException,
IllegalArgumentException, InvocationTargetException {
PropertyDescriptor pd;
try {
pd = new PropertyDescriptor(field.getName(), object.getClass());
Method setter = pd.getWriteMethod();
if (setter != null) {
// Exceptions are thrown forward if this fails
setter.invoke(object, value);
}
} catch (IntrospectionException e1) {
// Ignore this and try to set directly using the field
}
// Try to set the value directly to the field or throw an exception
if (!field.isAccessible()) {
// Try to gain access even if field is private
field.setAccessible(true);
}
field.set(object, value);
} | 3.68 |
framework_LegacyApplication_setLogoutURL | /**
* Sets the URL user is redirected to on application close. If the URL is
* <code>null</code>, the application is closed normally as defined by the
* application running environment: Desktop application just closes the
* application window and web-application redirects the browser to
* application main URL.
*
* @param logoutURL
* the logoutURL to set.
*/
public void setLogoutURL(String logoutURL) {
this.logoutURL = logoutURL;
} | 3.68 |
hbase_BlockingRpcConnection_waitForWork | /*
* wait till someone signals us to start reading RPC response or it is idle too long, it is marked
* as to be closed, or the client is marked as not running.
* @return true if it is time to read a response; false otherwise.
*/
private synchronized boolean waitForWork() {
// beware of the concurrent access to the calls list: we can add calls, but as well
// remove them.
long waitUntil = EnvironmentEdgeManager.currentTime() + this.rpcClient.minIdleTimeBeforeClose;
for (;;) {
if (thread == null) {
return false;
}
if (!calls.isEmpty()) {
return true;
}
if (EnvironmentEdgeManager.currentTime() >= waitUntil) {
closeConn(
new IOException("idle connection closed with " + calls.size() + " pending request(s)"));
return false;
}
try {
wait(Math.min(this.rpcClient.minIdleTimeBeforeClose, 1000));
} catch (InterruptedException e) {
// Restore interrupt status
Thread.currentThread().interrupt();
}
}
} | 3.68 |
flink_TimestampsAndWatermarksTransformation_getWatermarkStrategy | /** Returns the {@code WatermarkStrategy} to use. */
public WatermarkStrategy<IN> getWatermarkStrategy() {
return watermarkStrategy;
} | 3.68 |
framework_VaadinService_requestEnd | /**
* Called after the framework has handled a request and the response has
* been written.
*
* @param request
* The request object
* @param response
* The response object
* @param session
* The session which was used during the request or null if the
* request did not use a session
*/
public void requestEnd(VaadinRequest request, VaadinResponse response,
VaadinSession session) {
if (session != null) {
assert VaadinSession.getCurrent() == session;
session.lock();
try {
cleanupSession(session);
final long duration = (System.nanoTime() - (Long) request
.getAttribute(REQUEST_START_TIME_ATTRIBUTE)) / 1000000;
session.setLastRequestDuration(duration);
} finally {
session.unlock();
}
}
CurrentInstance.clearAll();
} | 3.68 |
hbase_FuzzyRowFilter_preprocessMask | /**
* We need to preprocess mask array, as since we treat 2's as unfixed positions and -1 (0xff) as
* fixed positions
* @return mask array
*/
private byte[] preprocessMask(byte[] mask) {
if (!UNSAFE_UNALIGNED) {
// do nothing
return mask;
}
if (isPreprocessedMask(mask)) return mask;
for (int i = 0; i < mask.length; i++) {
if (mask[i] == 0) {
mask[i] = -1; // 0 -> -1
} else if (mask[i] == 1) {
mask[i] = 2;// 1 -> 2
}
}
return mask;
} | 3.68 |
hadoop_HttpReferrerAuditHeader_buildHttpReferrer | /**
* Build the referrer string.
* This includes dynamically evaluating all of the evaluated
* attributes.
* If there is an error creating the string it will be logged once
* per entry, and "" returned.
* @return a referrer string or ""
*/
public String buildHttpReferrer() {
String header;
try {
String queries;
// Update any params which are dynamically evaluated
evaluated.forEach((key, eval) ->
addAttribute(key, eval.get()));
// now build the query parameters from all attributes, static and
// evaluated, stripping out any from the filter
queries = attributes.entrySet().stream()
.filter(e -> !filter.contains(e.getKey()))
.map(e -> e.getKey() + "=" + e.getValue())
.collect(Collectors.joining("&"));
final URI uri = new URI("https", REFERRER_ORIGIN_HOST,
String.format(Locale.ENGLISH, REFERRER_PATH_FORMAT,
contextId, spanId, operationName),
queries,
null);
header = uri.toASCIIString();
} catch (URISyntaxException e) {
WARN_OF_URL_CREATION.warn("Failed to build URI for auditor: " + e, e);
header = "";
}
return header;
} | 3.68 |
open-banking-gateway_Xs2aConsentInfo_isMultipleScaAvailable | /**
* Is the current consent authorization using multiple SCA methods (SMS,email,etc.)
*/
public boolean isMultipleScaAvailable(Xs2aContext ctx) {
return null != ctx.getAvailableSca() && !ctx.getAvailableSca().isEmpty();
} | 3.68 |
querydsl_ColumnMetadata_getDigits | /**
* the number of fractional digits
*
* @return digits
*/
public int getDigits() {
return decimalDigits;
} | 3.68 |
hadoop_AbstractS3ACommitter_getRole | /**
* Used in logging and reporting to help disentangle messages.
* @return the committer's role.
*/
protected String getRole() {
return role;
} | 3.68 |
framework_WebBrowser_isFirefox | /**
* Tests whether the user is using Firefox.
*
* @return true if the user is using Firefox, false if the user is not using
* Firefox or if no information on the browser is present
*/
public boolean isFirefox() {
if (browserDetails == null) {
return false;
}
return browserDetails.isFirefox();
} | 3.68 |
morf_TableLoader_load | /**
* Load the specified records into the table.
*
* @param records the records to load.
*/
public void load(final Iterable<Record> records) {
if (truncateBeforeLoad) {
truncate(table);
}
insertOrMergeRecords(records);
} | 3.68 |
flink_ArrayColumnReader_collectDataFromParquetPage | /**
* Collects data from a parquet page and returns the final row index where it stopped. The
* returned index can be equal to or less than total.
*
* @param total maximum number of rows to collect
* @param lcv column vector to do initial setup in data collection time
* @param valueList collection of values that will be fed into the vector later
* @param type the element type of array
* @return int
*/
private int collectDataFromParquetPage(
int total, HeapArrayVector lcv, List<Object> valueList, LogicalType type) {
int index = 0;
/*
* Here is a nested loop for collecting all values from a parquet page.
* A column of array type can be considered as a list of lists, so the two loops are as below:
* 1. The outer loop iterates on rows (index is a row index, so points to a row in the batch), e.g.:
* [0, 2, 3] <- index: 0
* [NULL, 3, 4] <- index: 1
*
* 2. The inner loop iterates on values within a row (sets all data from parquet data page
* for an element in ListColumnVector), so fetchNextValue returns values one-by-one:
* 0, 2, 3, NULL, 3, 4
*
* As described below, the repetition level (repetitionLevel != 0)
* can be used to decide when we'll start to read values for the next list.
*/
while (!eof && index < total) {
// add element to ListColumnVector one by one
lcv.getOffsets()[index] = valueList.size();
/*
* Let's collect all values for a single list.
* Repetition level = 0 means that a new list started there in the parquet page,
* in that case, let's exit from the loop, and start to collect value for a new list.
*/
do {
/*
* Definition level = 0 when a NULL value was returned instead of a list
* (this is not the same as a NULL value in of a list).
*/
if (definitionLevel == 0) {
lcv.setNullAt(index);
}
valueList.add(
isCurrentPageDictionaryEncoded
? dictionaryDecodeValue(type, (Integer) lastValue)
: lastValue);
} while (fetchNextValue(type) && (repetitionLevel != 0));
lcv.getLengths()[index] = valueList.size() - lcv.getOffsets()[index];
index++;
}
return index;
} | 3.68 |
hbase_MemStoreCompactor_stop | /**
* ---------------------------------------------------------------------- The request to cancel
* the compaction asynchronous task The compaction may still happen if the request was sent too
* late Non-blocking request
*/
public void stop() {
isInterrupted.compareAndSet(false, true);
} | 3.68 |
flink_CompletedOperationCache_finishOperation | /**
* Creates a new instance of the tracker with the result of the asynchronous operation set.
*/
public ResultAccessTracker<R> finishOperation(final OperationResult<R> operationResult) {
checkState(!this.operationResult.isFinished());
return new ResultAccessTracker<>(checkNotNull(operationResult), this.accessed);
} | 3.68 |
framework_GridDragSource_getGrid | /**
* Gets the grid this extension has been attached to.
*
* @return the grid for this extension
* @since 8.2
*/
public Grid<T> getGrid() {
return getParent();
} | 3.68 |
morf_SqlServerDialect_preInsertWithPresetAutonumStatements | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#preInsertWithPresetAutonumStatements(org.alfasoftware.morf.metadata.Table, boolean)
*/
@Override
public Collection<String> preInsertWithPresetAutonumStatements(Table table, boolean insertingUnderAutonumLimit) {
if (getAutoIncrementColumnForTable(table) != null) {
return Arrays.asList("SET IDENTITY_INSERT " + schemaNamePrefix() + table.getName() + " ON");
} else {
return SqlDialect.NO_STATEMENTS;
}
} | 3.68 |
dubbo_InternalThreadLocal_onRemoval | /**
* Invoked when this thread local variable is removed by {@link #remove()}.
*/
protected void onRemoval(@SuppressWarnings("unused") V value) throws Exception {
} | 3.68 |
hadoop_TaskManifest_getDestDirectories | /**
* All the destination directories.
* @return directory list.
*/
public List<DirEntry> getDestDirectories() {
return destDirectories;
} | 3.68 |
hbase_MonitoredRPCHandlerImpl_getRPCPacketLength | /**
* Produces a string representation of the method currently being serviced by this Handler.
* @return A human-readable string representation of the method call.
*/
@Override
public long getRPCPacketLength() {
if (getState() != State.RUNNING || packet == null) {
// no RPC is currently running, or we don't have an RPC's packet info
return -1L;
}
return packet.getSerializedSize();
} | 3.68 |
hbase_Bytes_toBoolean | /**
* Reverses {@link #toBytes(boolean)}
* @param b array
* @return True or false.
*/
public static boolean toBoolean(final byte[] b) {
if (b.length != 1) {
throw new IllegalArgumentException("Array has wrong size: " + b.length);
}
return b[0] != (byte) 0;
} | 3.68 |
hbase_TimeRange_getMin | /** Returns the smallest timestamp that should be considered */
public long getMin() {
return minStamp;
} | 3.68 |
hadoop_StateStoreUtils_getRecordName | /**
* Get the base class name for a record. If we get an implementation of a
* record we will return the real parent record class.
*
* @param <T> Type of the class of the data record.
* @param clazz Class of the data record to check.
* @return Name of the base class for the record.
*/
public static <T extends BaseRecord> String getRecordName(
final Class<T> clazz) {
return getRecordClass(clazz).getSimpleName();
} | 3.68 |
querydsl_BeanPath_createCollection | /**
* Create a new Collection typed path
*
* @param <A>
* @param property property name
* @param type property type
* @return property path
*/
@SuppressWarnings("unchecked")
protected <A, Q extends SimpleExpression<? super A>> CollectionPath<A, Q> createCollection(String property, Class<? super A> type, Class<? super Q> queryType, PathInits inits) {
return add(new CollectionPath<A, Q>(type, (Class) queryType, forProperty(property), inits));
} | 3.68 |
pulsar_EventLoopUtil_shutdownGracefully | /**
* Shutdowns the EventLoopGroup gracefully. Returns a {@link CompletableFuture}
* @param eventLoopGroup the event loop to shutdown
* @return CompletableFuture that completes when the shutdown has completed
*/
public static CompletableFuture<Void> shutdownGracefully(EventLoopGroup eventLoopGroup) {
return NettyFutureUtil.toCompletableFutureVoid(eventLoopGroup.shutdownGracefully());
} | 3.68 |
querydsl_BeanMap_entryIterator | /**
* Convenience method for getting an iterator over the entries.
*
* @return an iterator over the entries
*/
public Iterator<Entry<String, Object>> entryIterator() {
final Iterator<String> iter = keyIterator();
return new Iterator<Entry<String, Object>>() {
@Override
public boolean hasNext() {
return iter.hasNext();
}
@Override
public Entry<String, Object> next() {
String key = iter.next();
Object value = get(key);
return new MyMapEntry(BeanMap.this, key, value);
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove() not supported for BeanMap");
}
};
} | 3.68 |
framework_VDebugWindow_init | /**
* Called when the window is initialized.
*/
public void init() {
show();
/*
* Finalize initialization when all entry points have had the chance to
* e.g. register new sections.
*/
Scheduler.get().scheduleFinally(() -> {
readStoredState();
Window.addResizeHandler(
new com.google.gwt.event.logical.shared.ResizeHandler() {
Timer t = new Timer() {
@Override
public void run() {
applyPositionAndSize();
}
};
@Override
public void onResize(ResizeEvent event) {
t.cancel();
// TODO less
t.schedule(1000);
}
});
});
} | 3.68 |
pulsar_NettyFutureUtil_toCompletableFutureVoid | /**
* Converts a Netty {@link Future} to {@link CompletableFuture} with Void type.
*
* @param future Netty future
* @return converted future instance
*/
public static CompletableFuture<Void> toCompletableFutureVoid(Future<?> future) {
Objects.requireNonNull(future, "future cannot be null");
CompletableFuture<Void> adapter = new CompletableFuture<>();
if (future.isDone()) {
if (future.isSuccess()) {
adapter.complete(null);
} else {
adapter.completeExceptionally(future.cause());
}
} else {
future.addListener(f -> {
if (f.isSuccess()) {
adapter.complete(null);
} else {
adapter.completeExceptionally(f.cause());
}
});
}
return adapter;
} | 3.68 |
framework_AbstractOrderedLayout_setExpandRatio | /**
* <p>
* This method is used to control how excess space in layout is distributed
* among components. Excess space may exist if layout is sized and contained
* non relatively sized components don't consume all available space.
*
* <p>
* Example how to distribute 1:3 (33%) for component1 and 2:3 (67%) for
* component2 :
*
* <code>
* layout.setExpandRatio(component1, 1);<br>
* layout.setExpandRatio(component2, 2);
* </code>
*
* <p>
* If no ratios have been set, the excess space is distributed evenly among
* all components.
*
* <p>
* Note, that width or height (depending on orientation) needs to be defined
* for this method to have any effect.
*
* @see Sizeable
*
* @param component
* the component in this layout which expand ratio is to be set
* @param ratio
* new expand ratio (greater or equal to 0)
* @throws IllegalArgumentException
* if the expand ratio is negative or the component is not a
* direct child of the layout
*/
public void setExpandRatio(Component component, float ratio) {
ChildComponentData childData = getState().childData.get(component);
if (childData == null) {
throw new IllegalArgumentException(
"The given component is not a child of this layout");
}
if (ratio < 0.0f) {
throw new IllegalArgumentException(
"Expand ratio can't be less than 0.0");
}
childData.expandRatio = ratio;
} | 3.68 |
hbase_HFileInfo_initMetaAndIndex | /**
* should be called after initTrailerAndContext
*/
public void initMetaAndIndex(HFile.Reader reader) throws IOException {
ReaderContext context = reader.getContext();
try {
HFileBlock.FSReader blockReader = reader.getUncachedBlockReader();
// Initialize an block iterator, and parse load-on-open blocks in the following.
blockIter = blockReader.blockRange(trailer.getLoadOnOpenDataOffset(),
context.getFileSize() - trailer.getTrailerSize());
// Data index. We also read statistics about the block index written after
// the root level.
HFileBlock dataBlockRootIndex = blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX);
HFileBlock metaBlockIndex = blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX);
loadMetaInfo(blockIter, hfileContext);
HFileIndexBlockEncoder indexBlockEncoder =
HFileIndexBlockEncoderImpl.createFromFileInfo(this);
this.dataIndexReader = new HFileBlockIndex.CellBasedKeyBlockIndexReaderV2(
trailer.createComparator(), trailer.getNumDataIndexLevels(), indexBlockEncoder);
dataIndexReader.readMultiLevelIndexRoot(dataBlockRootIndex, trailer.getDataIndexCount());
reader.setDataBlockIndexReader(dataIndexReader);
// Meta index.
this.metaIndexReader = new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1);
metaIndexReader.readRootIndex(metaBlockIndex, trailer.getMetaIndexCount());
reader.setMetaBlockIndexReader(metaIndexReader);
reader.setDataBlockEncoder(HFileDataBlockEncoderImpl.createFromFileInfo(this));
// Load-On-Open info
HFileBlock b;
while ((b = blockIter.nextBlock()) != null) {
loadOnOpenBlocks.add(b);
}
// close the block reader
context.getInputStreamWrapper().unbuffer();
} catch (Throwable t) {
IOUtils.closeQuietly(context.getInputStreamWrapper(),
e -> LOG.warn("failed to close input stream wrapper", e));
throw new CorruptHFileException(
"Problem reading data index and meta index from file " + context.getFilePath(), t);
}
} | 3.68 |
hbase_Filter_filterCell | /**
* A way to filter based on the column family, column qualifier and/or the column value. Return
* code is described below. This allows filters to filter only certain number of columns, then
* terminate without matching ever column. If filterRowKey returns true, filterCell needs to be
* consistent with it. filterCell can assume that filterRowKey has already been called for the
* row. If your filter returns <code>ReturnCode.NEXT_ROW</code>, it should return
* <code>ReturnCode.NEXT_ROW</code> until {@link #reset()} is called just in case the caller calls
* for the next row. Concrete implementers can signal a failure condition in their code by
* throwing an {@link IOException}.
* @param c the Cell in question
* @return code as described below
* @throws IOException in case an I/O or an filter specific failure needs to be signaled.
* @see Filter.ReturnCode
*/
public ReturnCode filterCell(final Cell c) throws IOException {
return ReturnCode.INCLUDE;
} | 3.68 |
morf_UpgradePath_getSql | /**
* @return the sql
*/
public List<String> getSql() {
List<String> results = Lists.newLinkedList();
if (!sql.isEmpty() || !upgradeScriptAdditions.isEmpty())
results.addAll(initialisationSql);
results.addAll(sql);
for (UpgradeScriptAddition addition : upgradeScriptAdditions) {
Iterables.addAll(results, addition.sql(connectionResources));
}
if (!results.isEmpty())
results.addAll(finalisationSql);
return Collections.unmodifiableList(results);
} | 3.68 |
flink_HiveTableUtil_extractHiveTableInfo | /**
* Get the hive table's information.
*
* @return non-part fields, part fields, notNullColumns, primaryKey.
*/
private static Tuple4<
List<FieldSchema>, List<FieldSchema>, Set<String>, Optional<UniqueConstraint>>
extractHiveTableInfo(
HiveConf hiveConf,
Table hiveTable,
HiveMetastoreClientWrapper client,
HiveShim hiveShim) {
List<FieldSchema> fields = getNonPartitionFields(hiveConf, hiveTable, hiveShim);
Set<String> notNullColumns =
client.getNotNullColumns(hiveConf, hiveTable.getDbName(), hiveTable.getTableName());
Optional<UniqueConstraint> primaryKey =
client.getPrimaryKey(
hiveTable.getDbName(),
hiveTable.getTableName(),
HiveTableUtil.relyConstraint((byte) 0));
// PK columns cannot be null
primaryKey.ifPresent(pk -> notNullColumns.addAll(pk.getColumns()));
return Tuple4.of(fields, hiveTable.getPartitionKeys(), notNullColumns, primaryKey);
} | 3.68 |
framework_VAbstractOrderedLayout_getLayoutManager | /**
* Get the layout manager used by this layout.
*
* @return the layout manager
*/
public LayoutManager getLayoutManager() {
return layoutManager;
} | 3.68 |
hbase_Result_containsColumn | /**
* Checks for existence of a value for the specified column (empty or not).
* @param family family name
* @param foffset family offset
* @param flength family length
* @param qualifier column qualifier
* @param qoffset qualifier offset
* @param qlength qualifier length
* @return true if at least one value exists in the result, false if not
*/
public boolean containsColumn(byte[] family, int foffset, int flength, byte[] qualifier,
int qoffset, int qlength) {
return getColumnLatestCell(family, foffset, flength, qualifier, qoffset, qlength) != null;
} | 3.68 |
flink_JMXService_stopInstance | /** Stop the JMX server. */
public static synchronized void stopInstance() throws IOException {
if (jmxServer != null) {
jmxServer.stop();
jmxServer = null;
}
} | 3.68 |
hbase_SpaceQuotaSnapshotNotifierFactory_create | /**
* Instantiates the {@link SpaceQuotaSnapshotNotifier} implementation as defined in the
* configuration provided.
* @param conf Configuration object
* @return The SpaceQuotaSnapshotNotifier implementation
* @throws IllegalArgumentException if the class could not be instantiated
*/
public SpaceQuotaSnapshotNotifier create(Configuration conf) {
Class<? extends SpaceQuotaSnapshotNotifier> clz = Objects.requireNonNull(conf)
.getClass(SNAPSHOT_NOTIFIER_KEY, SNAPSHOT_NOTIFIER_DEFAULT, SpaceQuotaSnapshotNotifier.class);
try {
return clz.getDeclaredConstructor().newInstance();
} catch (Exception e) {
throw new IllegalArgumentException("Failed to instantiate the implementation", e);
}
} | 3.68 |
hbase_RestoreTool_incrementalRestoreTable | /**
* During incremental backup operation. Call WalPlayer to replay WAL in backup image Currently
* tableNames and newTablesNames only contain single table, will be expanded to multiple tables in
* the future
* @param conn HBase connection
* @param tableBackupPath backup path
* @param logDirs : incremental backup folders, which contains WAL
* @param tableNames : source tableNames(table names were backuped)
* @param newTableNames : target tableNames(table names to be restored to)
* @param incrBackupId incremental backup Id
* @throws IOException exception
*/
public void incrementalRestoreTable(Connection conn, Path tableBackupPath, Path[] logDirs,
TableName[] tableNames, TableName[] newTableNames, String incrBackupId) throws IOException {
try (Admin admin = conn.getAdmin()) {
if (tableNames.length != newTableNames.length) {
throw new IOException("Number of source tables and target tables does not match!");
}
FileSystem fileSys = tableBackupPath.getFileSystem(this.conf);
// for incremental backup image, expect the table already created either by user or previous
// full backup. Here, check that all new tables exists
for (TableName tableName : newTableNames) {
if (!admin.tableExists(tableName)) {
throw new IOException("HBase table " + tableName
+ " does not exist. Create the table first, e.g. by restoring a full backup.");
}
}
// adjust table schema
for (int i = 0; i < tableNames.length; i++) {
TableName tableName = tableNames[i];
TableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, incrBackupId);
if (tableDescriptor == null) {
throw new IOException("Can't find " + tableName + "'s descriptor.");
}
LOG.debug("Found descriptor " + tableDescriptor + " through " + incrBackupId);
TableName newTableName = newTableNames[i];
TableDescriptor newTableDescriptor = admin.getDescriptor(newTableName);
List<ColumnFamilyDescriptor> families = Arrays.asList(tableDescriptor.getColumnFamilies());
List<ColumnFamilyDescriptor> existingFamilies =
Arrays.asList(newTableDescriptor.getColumnFamilies());
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(newTableDescriptor);
boolean schemaChangeNeeded = false;
for (ColumnFamilyDescriptor family : families) {
if (!existingFamilies.contains(family)) {
builder.setColumnFamily(family);
schemaChangeNeeded = true;
}
}
for (ColumnFamilyDescriptor family : existingFamilies) {
if (!families.contains(family)) {
builder.removeColumnFamily(family.getName());
schemaChangeNeeded = true;
}
}
if (schemaChangeNeeded) {
modifyTableSync(conn, builder.build());
LOG.info("Changed " + newTableDescriptor.getTableName() + " to: " + newTableDescriptor);
}
}
RestoreJob restoreService = BackupRestoreFactory.getRestoreJob(conf);
restoreService.run(logDirs, tableNames, restoreRootDir, newTableNames, false);
}
} | 3.68 |
framework_AbstractClientConnector_getState | /**
* Returns the shared state for this connector.
*
* @param markAsDirty
* true if the connector should automatically be marked dirty,
* false otherwise
*
* @return The shared state for this connector. Never null.
* @see #getState()
*/
protected SharedState getState(boolean markAsDirty) {
assert getSession() == null
|| getSession().hasLock() : buildLockAssertMessage(
"getState()");
if (null == sharedState) {
sharedState = createState();
}
if (markAsDirty) {
UI ui = getUI();
if (ui != null && !ui.getConnectorTracker().isDirty(this)
&& !ui.getConnectorTracker().isWritingResponse()) {
ui.getConnectorTracker().markDirty(this);
}
}
return sharedState;
} | 3.68 |
hadoop_AbstractS3ACommitter_buildJobUUID | /**
* Build the job UUID.
*
* <p>
* In MapReduce jobs, the application ID is issued by YARN, and
* unique across all jobs.
* </p>
* <p>
* Spark will use a fake app ID based on the current time.
* This can lead to collisions on busy clusters unless
* the specific spark release has SPARK-33402 applied.
* This appends a random long value to the timestamp, so
* is unique enough that the risk of collision is almost
* nonexistent.
* </p>
* <p>
* The order of selection of a uuid is
* </p>
* <ol>
* <li>Value of
* {@link InternalCommitterConstants#FS_S3A_COMMITTER_UUID}.</li>
* <li>Value of
* {@link InternalCommitterConstants#SPARK_WRITE_UUID}.</li>
* <li>If enabled through
* {@link CommitConstants#FS_S3A_COMMITTER_GENERATE_UUID}:
* Self-generated uuid.</li>
* <li>If {@link CommitConstants#FS_S3A_COMMITTER_REQUIRE_UUID}
* is not set: Application ID</li>
* </ol>
* The UUID bonding takes place during construction;
* the staging committers use it to set up their wrapped
* committer to a path in the cluster FS which is unique to the
* job.
* <p>
* In MapReduce jobs, the application ID is issued by YARN, and
* unique across all jobs.
* </p>
* In {@link #setupJob(JobContext)} the job context's configuration
* will be patched
* be valid in all sequences where the job has been set up for the
* configuration passed in.
* <p>
* If the option {@link CommitConstants#FS_S3A_COMMITTER_REQUIRE_UUID}
* is set, then an external UUID MUST be passed in.
* This can be used to verify that the spark engine is reliably setting
* unique IDs for staging.
* </p>
* @param conf job/task configuration
* @param jobId job ID from YARN or spark.
* @return Job UUID and source of it.
* @throws PathCommitException no UUID was found and it was required
*/
public static Pair<String, JobUUIDSource>
buildJobUUID(Configuration conf, JobID jobId)
throws PathCommitException {
String jobUUID = conf.getTrimmed(FS_S3A_COMMITTER_UUID, "");
if (!jobUUID.isEmpty()) {
return Pair.of(jobUUID, JobUUIDSource.CommitterUUIDProperty);
}
// there is no job UUID.
// look for one from spark
jobUUID = conf.getTrimmed(SPARK_WRITE_UUID, "");
if (!jobUUID.isEmpty()) {
return Pair.of(jobUUID, JobUUIDSource.SparkWriteUUID);
}
// there is no UUID configuration in the job/task config
// Check the job hasn't declared a requirement for the UUID.
// This allows or fail-fast validation of Spark behavior.
if (conf.getBoolean(FS_S3A_COMMITTER_REQUIRE_UUID,
DEFAULT_S3A_COMMITTER_REQUIRE_UUID)) {
throw new PathCommitException("", E_NO_SPARK_UUID);
}
// see if the job can generate a random UUI`
if (conf.getBoolean(FS_S3A_COMMITTER_GENERATE_UUID,
DEFAULT_S3A_COMMITTER_GENERATE_UUID)) {
// generate a random UUID. This is OK for a job, for a task
// it means that the data may not get picked up.
String newId = UUID.randomUUID().toString();
LOG.warn("No job ID in configuration; generating a random ID: {}",
newId);
return Pair.of(newId, JobUUIDSource.GeneratedLocally);
}
// if no other option was supplied, return the job ID.
// This is exactly what MR jobs expect, but is not what
// Spark jobs can do as there is a risk of jobID collision.
return Pair.of(jobId.toString(), JobUUIDSource.JobID);
} | 3.68 |
flink_OrcSplitReader_ensureBatch | /**
* Checks if there is at least one row left in the batch to return. If no more row are
* available, it reads another batch of rows.
*
* @return Returns true if there is one more row to return, false otherwise.
* @throws IOException throw if an exception happens while reading a batch.
*/
private boolean ensureBatch() throws IOException {
if (nextRow >= rowsInBatch) {
// Try to read the next batch if rows from the ORC file.
boolean moreRows = shim.nextBatch(orcRowsReader, rowBatchWrapper.getBatch());
if (moreRows) {
// No more rows available in the Rows array.
nextRow = 0;
// Load the data into the Rows array.
rowsInBatch = fillRows();
}
return moreRows;
}
// there is at least one Row left in the Rows array.
return true;
} | 3.68 |
hadoop_Server_getConfig | /**
* Returns the server configuration.
*
* @return the server configuration.
*/
public Configuration getConfig() {
return config;
} | 3.68 |
pulsar_ResourceGroup_getRgUsageReportedCount | // Visibility for unit testing
protected static double getRgUsageReportedCount (String rgName, String monClassName) {
return rgLocalUsageReportCount.labels(rgName, monClassName).get();
} | 3.68 |
hbase_BackupManager_initialize | /**
* Start the backup manager service.
* @throws IOException exception
*/
public void initialize() throws IOException {
String ongoingBackupId = this.getOngoingBackupId();
if (ongoingBackupId != null) {
LOG.info("There is a ongoing backup {}"
+ ". Can not launch new backup until no ongoing backup remains.", ongoingBackupId);
throw new BackupException("There is ongoing backup seesion.");
}
} | 3.68 |
framework_Tree_getTransferable | /*
* (non-Javadoc)
*
* @see com.vaadin.event.dd.DragSource#getTransferable(java.util.Map)
*/
@Override
public Transferable getTransferable(Map<String, Object> payload) {
TreeTransferable transferable = new TreeTransferable(this, payload);
// updating drag source variables
Object object = payload.get("itemId");
if (object != null) {
transferable.setData("itemId", itemIdMapper.get((String) object));
}
return transferable;
} | 3.68 |
hbase_FilterBase_createFilterFromArguments | /**
* Given the filter's arguments it constructs the filter
* <p>
* @param filterArguments the filter's arguments
* @return constructed filter object
*/
public static Filter createFilterFromArguments(ArrayList<byte[]> filterArguments) {
throw new IllegalArgumentException("This method has not been implemented");
} | 3.68 |
hbase_CheckAndMutate_getFamily | /** Returns the family to check */
public byte[] getFamily() {
return family;
} | 3.68 |
pulsar_SimpleLoadManagerImpl_doLoadRanking | /**
* Rank brokers by available capacity, or load percentage, based on placement strategy:
*
* - Available capacity for weighted random selection (weightedRandomSelection): ranks ResourceUnits units based on
* estimation of their capacity which is basically how many bundles each ResourceUnit is able can handle with its
* available resources (CPU, memory, network, etc);
*
* - Load percentage for least loaded server (leastLoadedServer): ranks ResourceUnits units based on estimation of
* their load percentage which is basically how many percent of resource is allocated which is
* max(resource_actually_used, resource_quota)
*
* If we fail to collect the Load Reports OR fail to process them for the first time, it means the leader does not
* have enough information to make a decision so we set it to ready when we collect and process the load reports
* successfully the first time.
*/
private synchronized void doLoadRanking() {
ResourceUnitRanking.setCpuUsageByMsgRate(this.realtimeCpuLoadFactor);
String hostname = pulsar.getAdvertisedAddress();
String strategy = this.getLoadBalancerPlacementStrategy();
log.info("doLoadRanking - load balancing strategy: {}", strategy);
if (!currentLoadReports.isEmpty()) {
Map<Long, Set<ResourceUnit>> newSortedRankings = new TreeMap<>();
Map<ResourceUnit, ResourceUnitRanking> newResourceUnitRankings = new HashMap<>();
ResourceQuota defaultResourceQuota =
pulsar.getBrokerService().getBundlesQuotas().getDefaultResourceQuota().join();
for (Map.Entry<ResourceUnit, LoadReport> entry : currentLoadReports.entrySet()) {
ResourceUnit resourceUnit = entry.getKey();
LoadReport loadReport = entry.getValue();
// calculate rankings
Set<String> loadedBundles = loadReport.getBundles();
Set<String> preAllocatedBundles = null;
if (resourceUnitRankings.containsKey(resourceUnit)) {
preAllocatedBundles = resourceUnitRankings.get(resourceUnit).getPreAllocatedBundles();
preAllocatedBundles.removeAll(loadedBundles);
} else {
preAllocatedBundles = new HashSet<>();
}
ResourceQuota allocatedQuota = getTotalAllocatedQuota(loadedBundles);
ResourceQuota preAllocatedQuota = getTotalAllocatedQuota(preAllocatedBundles);
ResourceUnitRanking ranking = new ResourceUnitRanking(loadReport.getSystemResourceUsage(),
loadedBundles, allocatedQuota, preAllocatedBundles, preAllocatedQuota);
newResourceUnitRankings.put(resourceUnit, ranking);
// generated sorted ranking
double loadPercentage = ranking.getEstimatedLoadPercentage();
long maxCapacity = ranking.estimateMaxCapacity(defaultResourceQuota);
long finalRank = 0;
if (strategy.equals(LOADBALANCER_STRATEGY_LLS)) {
finalRank = (long) loadPercentage;
} else if (strategy.equals(LOADBALANCER_STRATEGY_LEAST_MSG)) {
finalRank = (long) ranking.getEstimatedMessageRate();
} else {
double idleRatio = (100 - loadPercentage) / 100;
finalRank = (long) (maxCapacity * idleRatio * idleRatio);
}
newSortedRankings.computeIfAbsent(finalRank, k -> new HashSet<>())
.add(entry.getKey());
if (log.isDebugEnabled()) {
log.debug("Added Resource Unit [{}] with Rank [{}]", entry.getKey().getResourceId(), finalRank);
}
// update metrics
if (resourceUnit.getResourceId().contains(hostname)) {
updateLoadBalancingMetrics(hostname, finalRank, ranking);
}
}
updateBrokerToNamespaceToBundle();
this.sortedRankings.set(newSortedRankings);
this.resourceUnitRankings = newResourceUnitRankings;
} else {
log.info("Leader broker[{}] No ResourceUnits to rank this run, Using Old Ranking",
pulsar.getSafeWebServiceAddress());
}
} | 3.68 |
hbase_MasterRpcServices_checkCoprocessorWithService | /**
* Determines if there is a coprocessor implementation in the provided argument which extends or
* implements the provided {@code service}.
*/
boolean checkCoprocessorWithService(List<MasterCoprocessor> coprocessorsToCheck,
Class<?> service) {
if (coprocessorsToCheck == null || coprocessorsToCheck.isEmpty()) {
return false;
}
for (MasterCoprocessor cp : coprocessorsToCheck) {
if (service.isAssignableFrom(cp.getClass())) {
return true;
}
}
return false;
} | 3.68 |
dubbo_FrameworkExecutorRepository_getConnectivityScheduledExecutor | /**
* Scheduled executor handle connectivity check task
*
* @return ScheduledExecutorService
*/
public ScheduledExecutorService getConnectivityScheduledExecutor() {
return connectivityScheduledExecutor;
} | 3.68 |
hudi_HoodieRecord_setCurrentLocation | /**
* Sets the current currentLocation of the record. This should happen exactly-once
*/
public HoodieRecord setCurrentLocation(HoodieRecordLocation location) {
checkState();
assert currentLocation == null;
this.currentLocation = location;
return this;
} | 3.68 |
hbase_UnsafeAccess_putByte | /**
* Put a byte value out to the specified BB position in big-endian format.
* @param buf the byte buffer
* @param offset position in the buffer
* @param b byte to write out
* @return incremented offset
*/
public static int putByte(ByteBuffer buf, int offset, byte b) {
if (buf.isDirect()) {
HBasePlatformDependent.putByte(directBufferAddress(buf) + offset, b);
} else {
HBasePlatformDependent.putByte(buf.array(),
BYTE_ARRAY_BASE_OFFSET + buf.arrayOffset() + offset, b);
}
return offset + 1;
} | 3.68 |
flink_NetUtils_unresolvedHostToNormalizedString | /**
* Returns an address in a normalized format for Pekko. When an IPv6 address is specified, it
* normalizes the IPv6 address to avoid complications with the exact URL match policy of Pekko.
*
* @param host The hostname, IPv4 or IPv6 address
* @return host which will be normalized if it is an IPv6 address
*/
public static String unresolvedHostToNormalizedString(String host) {
// Return loopback interface address if host is null
// This represents the behavior of {@code InetAddress.getByName } and RFC 3330
if (host == null) {
host = InetAddress.getLoopbackAddress().getHostAddress();
} else {
host = host.trim().toLowerCase();
if (host.startsWith("[") && host.endsWith("]")) {
String address = host.substring(1, host.length() - 1);
if (InetAddresses.isInetAddress(address)) {
host = address;
}
}
}
// normalize and valid address
if (InetAddresses.isInetAddress(host)) {
InetAddress inetAddress = InetAddresses.forString(host);
if (inetAddress instanceof Inet6Address) {
byte[] ipV6Address = inetAddress.getAddress();
host = getIPv6UrlRepresentation(ipV6Address);
}
} else {
try {
// We don't allow these in hostnames
Preconditions.checkArgument(!host.startsWith("."));
Preconditions.checkArgument(!host.endsWith("."));
Preconditions.checkArgument(!host.contains(":"));
} catch (Exception e) {
throw new IllegalConfigurationException("The configured hostname is not valid", e);
}
}
return host;
} | 3.68 |
rocketmq-connect_JdbcDriverInfo_jdbcMajorVersion | /**
* Get the major version of the JDBC specification supported by the driver.
*
* @return the major version number
*/
public int jdbcMajorVersion() {
return jdbcMajorVersion;
} | 3.68 |
zxing_PDF417HighLevelEncoder_encodeBinary | /**
* Encode parts of the message using Byte Compaction as described in ISO/IEC 15438:2001(E),
* chapter 4.4.3. The Unicode characters will be converted to binary using the cp437
* codepage.
*
* @param bytes the message converted to a byte array
* @param startpos the start position within the message
* @param count the number of bytes to encode
* @param startmode the mode from which this method starts
* @param sb receives the encoded codewords
*/
private static void encodeBinary(byte[] bytes,
int startpos,
int count,
int startmode,
StringBuilder sb) {
if (count == 1 && startmode == TEXT_COMPACTION) {
sb.append((char) SHIFT_TO_BYTE);
} else {
if ((count % 6) == 0) {
sb.append((char) LATCH_TO_BYTE);
} else {
sb.append((char) LATCH_TO_BYTE_PADDED);
}
}
int idx = startpos;
// Encode sixpacks
if (count >= 6) {
char[] chars = new char[5];
while ((startpos + count - idx) >= 6) {
long t = 0;
for (int i = 0; i < 6; i++) {
t <<= 8;
t += bytes[idx + i] & 0xff;
}
for (int i = 0; i < 5; i++) {
chars[i] = (char) (t % 900);
t /= 900;
}
for (int i = chars.length - 1; i >= 0; i--) {
sb.append(chars[i]);
}
idx += 6;
}
}
//Encode rest (remaining n<5 bytes if any)
for (int i = idx; i < startpos + count; i++) {
int ch = bytes[i] & 0xff;
sb.append((char) ch);
}
} | 3.68 |
flink_RouteResult_uri | /** Returns the original request URI. */
public String uri() {
return uri;
} | 3.68 |
hbase_BlockCacheUtil_shouldReplaceExistingCacheBlock | /**
* Because of the region splitting, it's possible that the split key locate in the middle of a
* block. So it's possible that both the daughter regions load the same block from their parent
* HFile. When pread, we don't force the read to read all of the next block header. So when two
* threads try to cache the same block, it's possible that one thread read all of the next block
* header but the other one didn't. if the already cached block hasn't next block header but the
* new block to cache has, then we can replace the existing block with the new block for better
* performance.(HBASE-20447)
* @param blockCache BlockCache to check
* @param cacheKey the block cache key
* @param newBlock the new block which try to put into the block cache.
* @return true means need to replace existing block with new block for the same block cache key.
* false means just keep the existing block.
*/
public static boolean shouldReplaceExistingCacheBlock(BlockCache blockCache,
BlockCacheKey cacheKey, Cacheable newBlock) {
// NOTICE: The getBlock has retained the existingBlock inside.
Cacheable existingBlock = blockCache.getBlock(cacheKey, false, false, false);
if (existingBlock == null) {
return true;
}
try {
int comparison = BlockCacheUtil.validateBlockAddition(existingBlock, newBlock, cacheKey);
if (comparison < 0) {
LOG.warn("Cached block contents differ by nextBlockOnDiskSize, the new block has "
+ "nextBlockOnDiskSize set. Caching new block.");
return true;
} else if (comparison > 0) {
LOG.warn("Cached block contents differ by nextBlockOnDiskSize, the existing block has "
+ "nextBlockOnDiskSize set, Keeping cached block.");
return false;
} else {
LOG.debug("Caching an already cached block: {}. This is harmless and can happen in rare "
+ "cases (see HBASE-8547)", cacheKey);
return false;
}
} finally {
// Release this block to decrement the reference count.
existingBlock.release();
}
} | 3.68 |
hudi_AvroInternalSchemaConverter_visitInternalPrimitiveToBuildAvroPrimitiveType | /**
* Converts hudi PrimitiveType to Avro PrimitiveType.
* this is auxiliary function used by visitInternalSchemaToBuildAvroSchema
*/
private static Schema visitInternalPrimitiveToBuildAvroPrimitiveType(Type.PrimitiveType primitive, String recordName) {
switch (primitive.typeId()) {
case BOOLEAN:
return Schema.create(Schema.Type.BOOLEAN);
case INT:
return Schema.create(Schema.Type.INT);
case LONG:
return Schema.create(Schema.Type.LONG);
case FLOAT:
return Schema.create(Schema.Type.FLOAT);
case DOUBLE:
return Schema.create(Schema.Type.DOUBLE);
case DATE:
return LogicalTypes.date().addToSchema(Schema.create(Schema.Type.INT));
case TIME:
return LogicalTypes.timeMicros().addToSchema(Schema.create(Schema.Type.LONG));
case TIMESTAMP:
return LogicalTypes.timestampMicros().addToSchema(Schema.create(Schema.Type.LONG));
case STRING:
return Schema.create(Schema.Type.STRING);
case BINARY:
return Schema.create(Schema.Type.BYTES);
case UUID: {
// NOTE: All schemas corresponding to Avro's type [[FIXED]] are generated
// with the "fixed" name to stay compatible w/ [[SchemaConverters]]
String name = recordName + AVRO_NAME_DELIMITER + "fixed";
Schema fixedSchema = Schema.createFixed(name, null, null, 16);
return LogicalTypes.uuid().addToSchema(fixedSchema);
}
case FIXED: {
Types.FixedType fixed = (Types.FixedType) primitive;
// NOTE: All schemas corresponding to Avro's type [[FIXED]] are generated
// with the "fixed" name to stay compatible w/ [[SchemaConverters]]
String name = recordName + AVRO_NAME_DELIMITER + "fixed";
return Schema.createFixed(name, null, null, fixed.getFixedSize());
}
case DECIMAL: {
Types.DecimalType decimal = (Types.DecimalType) primitive;
// NOTE: All schemas corresponding to Avro's type [[FIXED]] are generated
// with the "fixed" name to stay compatible w/ [[SchemaConverters]]
String name = recordName + AVRO_NAME_DELIMITER + "fixed";
Schema fixedSchema = Schema.createFixed(name,
null, null, computeMinBytesForPrecision(decimal.precision()));
return LogicalTypes.decimal(decimal.precision(), decimal.scale())
.addToSchema(fixedSchema);
}
default:
throw new UnsupportedOperationException(
"Unsupported type ID: " + primitive.typeId());
}
} | 3.68 |
flink_EmbeddedRocksDBStateBackend_setDbStoragePath | /**
* Sets the path where the RocksDB local database files should be stored on the local file
* system. Setting this path overrides the default behavior, where the files are stored across
* the configured temp directories.
*
* <p>Passing {@code null} to this function restores the default behavior, where the configured
* temp directories will be used.
*
* @param path The path where the local RocksDB database files are stored.
*/
public void setDbStoragePath(String path) {
setDbStoragePaths(path == null ? null : new String[] {path});
} | 3.68 |
flink_ConfigOptions_key | /**
* Starts building a new {@link ConfigOption}.
*
* @param key The key for the config option.
* @return The builder for the config option with the given key.
*/
public static OptionBuilder key(String key) {
checkNotNull(key);
return new OptionBuilder(key);
} | 3.68 |
flink_StreamExecutionEnvironment_addDefaultKryoSerializer | /**
* Adds a new Kryo default serializer to the Runtime.
*
* @param type The class of the types serialized with the given serializer.
* @param serializerClass The class of the serializer to use.
*/
public void addDefaultKryoSerializer(
Class<?> type, Class<? extends Serializer<?>> serializerClass) {
config.addDefaultKryoSerializer(type, serializerClass);
} | 3.68 |
morf_XmlDataSetProducer_hasNext | /**
* @see java.util.Iterator#hasNext()
*/
@Override
public boolean hasNext() {
return XmlDataSetNode.RECORD_NODE.equals(currentTagName);
} | 3.68 |
framework_ReflectTools_findCommonBaseType | /**
* Finds the most specific class that both provided classes extend from.
*
* @param a
* one class to get the base type for, not <code>null</code>
* @param b
* another class to get the base type for, not <code>null</code>
* @return the most specific base class, not <code>null</code>
*
* @since 8.0
*/
public static Class<?> findCommonBaseType(Class<?> a, Class<?> b) {
if (a.isInterface()) {
throw new IllegalArgumentException("a cannot be an interface");
}
if (b.isInterface()) {
throw new IllegalArgumentException("b cannot be an interface");
}
if (a.isAssignableFrom(b)) {
return a;
} else if (b.isAssignableFrom(a)) {
return b;
}
Class<?> currentClass = a;
while (!currentClass.isAssignableFrom(b)) {
currentClass = currentClass.getSuperclass();
}
return currentClass;
} | 3.68 |
flink_RpcEndpoint_getEndpointId | /**
* Returns the rpc endpoint's identifier.
*
* @return Rpc endpoint's identifier.
*/
public String getEndpointId() {
return endpointId;
} | 3.68 |
framework_LayoutDependencyTree_setNeedsVerticalMeasure | /**
* @param connectorId
* the connector id of the component whose vertical size might
* have changed
* @param needsMeasure
* {@code true} if measuring should be enabled, {@code false} if
* measuring should be disabled (disabling is only effective if
* there are no blockers)
*
* @deprecated Use
* {@link #setNeedsVerticalMeasure(ComponentConnector, boolean)}
* for improved performance.
*/
@Deprecated
public void setNeedsVerticalMeasure(String connectorId,
boolean needsMeasure) {
// Ensure connector exists
ComponentConnector connector = (ComponentConnector) ConnectorMap
.get(connection).getConnector(connectorId);
if (connector == null) {
return;
}
setNeedsVerticalMeasure(connector, needsMeasure);
} | 3.68 |
framework_DetailsManagerConnector_triggerDelayedRepositioning | /**
* Triggers repositioning of the the contents from the first affected row
* downwards if any of the rows fall within the visual range. If any other
* delayed repositioning has been triggered within this round trip the
* affected range is expanded as needed. The processing is delayed to make
* sure all updates have time to get in, otherwise the repositioning will be
* calculated separately for each details row addition or removal from the
* server side (see
* {@link DataCommunicatorClientRpc#updateData(elemental.json.JsonArray)}
* implementation within {@link DataCommunicatorConnector}).
*
* @param firstRowIndex
* the index of the first changed row
* @param numberOfRows
* the number of changed rows
*/
private void triggerDelayedRepositioning(int firstRowIndex,
int numberOfRows) {
if (delayedRepositioningStart == null
|| delayedRepositioningStart > firstRowIndex) {
delayedRepositioningStart = firstRowIndex;
}
if (delayedRepositioningEnd == null
|| delayedRepositioningEnd < firstRowIndex + numberOfRows) {
delayedRepositioningEnd = firstRowIndex + numberOfRows;
}
if (!delayedRepositioningTriggered) {
delayedRepositioningTriggered = true;
Scheduler.get().scheduleFinally(() -> {
// refresh the positions of all affected rows and those
// below them, unless all affected rows are outside of the
// visual range
if (getWidget().getEscalator().getVisibleRowRange()
.intersects(Range.between(delayedRepositioningStart,
delayedRepositioningEnd))) {
getWidget().getEscalator().getBody().updateRowPositions(
delayedRepositioningStart,
getWidget().getEscalator().getBody().getRowCount()
- delayedRepositioningStart);
}
delayedRepositioningTriggered = false;
delayedRepositioningStart = null;
delayedRepositioningEnd = null;
});
}
} | 3.68 |
flink_NFACompiler_extendWithUntilCondition | /**
* This method extends the given condition with stop(until) condition if necessary. The
* until condition needs to be applied only if both of the given conditions are not null.
*
* @param condition the condition to extend
* @param untilCondition the until condition to join with the given condition
* @param isTakeCondition whether the {@code condition} is for {@code TAKE} edge
* @return condition with AND applied or the original condition
*/
private IterativeCondition<T> extendWithUntilCondition(
IterativeCondition<T> condition,
IterativeCondition<T> untilCondition,
boolean isTakeCondition) {
if (untilCondition != null && condition != null) {
return new RichAndCondition<>(new RichNotCondition<>(untilCondition), condition);
} else if (untilCondition != null && isTakeCondition) {
return new RichNotCondition<>(untilCondition);
}
return condition;
} | 3.68 |
flink_KeyedStream_intervalJoin | /**
* Join elements of this {@link KeyedStream} with elements of another {@link KeyedStream} over a
* time interval that can be specified with {@link IntervalJoin#between(Time, Time)}.
*
* @param otherStream The other keyed stream to join this keyed stream with
* @param <T1> Type parameter of elements in the other stream
* @return An instance of {@link IntervalJoin} with this keyed stream and the other keyed stream
*/
@PublicEvolving
public <T1> IntervalJoin<T, T1, KEY> intervalJoin(KeyedStream<T1, KEY> otherStream) {
return new IntervalJoin<>(this, otherStream);
} | 3.68 |
rocketmq-connect_JsonSchemaSerializer_serialize | /**
* serialize
*
* @param topic
* @param isKey
* @param value
* @param schema
* @return
*/
public byte[] serialize(String topic, boolean isKey, JsonSchema schema, Object value) {
if (value == null) {
return null;
}
String subjectName = TopicNameStrategy.subjectName(topic, isKey);
try {
RegisterSchemaRequest schemaRequest = RegisterSchemaRequest
.builder()
.schemaType(schema.schemaType())
.compatibility(Compatibility.BACKWARD)
.schemaIdl(schema.toString())
.desc(schema.name())
.build();
SchemaResponse schemaResponse = registryClient.autoRegisterOrGetSchema(JsonSchemaData.NAMESPACE, topic, subjectName, schemaRequest, schema);
long schemaId = schemaResponse.getRecordId();
// parse idl
if (StringUtils.isNotEmpty(schemaResponse.getIdl())) {
schema = new JsonSchema(schemaResponse.getIdl());
}
// validate json value
if (converterConfig.validate()) {
JsonSchemaUtils.validate(schema.rawSchema(), value);
}
// serialize value
ByteArrayOutputStream out = new ByteArrayOutputStream();
out.write(ByteBuffer.allocate(ID_SIZE).putLong(schemaId).array());
out.write(OBJECT_MAPPER.writeValueAsBytes(value));
byte[] bytes = out.toByteArray();
out.close();
return bytes;
} catch (IOException e) {
throw new SerializationException("Error serializing JSON message", e);
}
} | 3.68 |
hbase_MetaTableAccessor_getCatalogFamilyRow | /** Returns Return the {@link HConstants#CATALOG_FAMILY} row from hbase:meta. */
public static Result getCatalogFamilyRow(Connection connection, RegionInfo ri)
throws IOException {
Get get = new Get(CatalogFamilyFormat.getMetaKeyForRegion(ri));
get.addFamily(HConstants.CATALOG_FAMILY);
try (Table t = getMetaHTable(connection)) {
return t.get(get);
}
} | 3.68 |
querydsl_AbstractEvaluatorFactory_createSource | /**
* @param source
* @param projectionType
* @param names
* @param types
* @param id
* @param constants
* @return
* @throws IOException
*/
protected String createSource(String source, ClassType projectionType, String[] names,
Type[] types, String id, Map<String, Object> constants) throws IOException {
// create source
StringWriter writer = new StringWriter();
JavaWriter javaw = new JavaWriter(writer);
SimpleType idType = new SimpleType(id, "", id);
javaw.beginClass(idType, null);
Parameter[] params = new Parameter[names.length + constants.size()];
for (int i = 0; i < names.length; i++) {
params[i] = new Parameter(names[i], types[i]);
}
int i = names.length;
for (Map.Entry<String, Object> entry : constants.entrySet()) {
Type type = new ClassType(TypeCategory.SIMPLE, ClassUtils.normalize(entry.getValue().getClass()));
params[i++] = new Parameter(entry.getKey(), type);
}
javaw.beginStaticMethod(projectionType, "eval", params);
javaw.append(source);
javaw.end();
javaw.end();
return writer.toString();
} | 3.68 |
hbase_Encryption_encrypt | /**
* Encrypt a stream of plaintext given a context and IV
* @param out ciphertext
* @param in plaintet
*/
public static void encrypt(OutputStream out, InputStream in, Context context, byte[] iv)
throws IOException {
Encryptor e = context.getCipher().getEncryptor();
e.setKey(context.getKey());
e.setIv(iv); // can be null
e.reset();
encrypt(out, in, e);
} | 3.68 |
hudi_BaseHoodieWriteClient_scheduleAndGetRestorePlan | /**
* Check if there is a failed restore with the same savepointToRestoreTimestamp. Reusing the commit instead of
* creating a new one will prevent causing some issues with the metadata table.
* */
private Pair<String, Option<HoodieRestorePlan>> scheduleAndGetRestorePlan(final String savepointToRestoreTimestamp, HoodieTable<T, I, K, O> table) throws IOException {
Option<HoodieInstant> failedRestore = table.getRestoreTimeline().filterInflightsAndRequested().lastInstant();
if (failedRestore.isPresent() && savepointToRestoreTimestamp.equals(RestoreUtils.getSavepointToRestoreTimestamp(table, failedRestore.get()))) {
return Pair.of(failedRestore.get().getTimestamp(), Option.of(RestoreUtils.getRestorePlan(table.getMetaClient(), failedRestore.get())));
}
final String restoreInstantTimestamp = createNewInstantTime();
return Pair.of(restoreInstantTimestamp, table.scheduleRestore(context, restoreInstantTimestamp, savepointToRestoreTimestamp));
} | 3.68 |
morf_InsertStatementDefaulter_getFieldDefault | /**
* Gets the default value for the {@code column}. If the column has a default
* value associated with it, this is returned. Otherwise a standard default is
* given.
*
* @param column The column to get the default for.
* @return the default value to use.
*/
private AliasedField getFieldDefault(Column column) {
if (isNullDefaultType(column)) {
return new NullFieldLiteral().as(column.getName());
}
if (StringUtils.isNotEmpty(column.getDefaultValue())) {
switch (column.getType()) {
case STRING:
return new FieldLiteral(column.getDefaultValue()).as(column.getName());
case BOOLEAN:
return new FieldLiteral(Boolean.valueOf(column.getDefaultValue())).as(column.getName());
case BIG_INTEGER:
case INTEGER:
return new FieldLiteral(Integer.valueOf(column.getDefaultValue())).as(column.getName());
case DECIMAL:
return new FieldLiteral(Double.valueOf(column.getDefaultValue())).as(column.getName());
default:
throw new UnsupportedOperationException("Cannot determine the default value for data of type " + column.getType());
}
} else {
switch (column.getType()) {
case STRING:
return new FieldLiteral("").as(column.getName());
case BOOLEAN:
return new FieldLiteral(false).as(column.getName());
case DECIMAL:
case INTEGER:
case BIG_INTEGER:
return new FieldLiteral(0).as(column.getName());
default:
throw new UnsupportedOperationException("Cannot determine the default value for data of type " + column.getType());
}
}
} | 3.68 |
querydsl_ExpressionUtils_orderBy | /**
* Create an expression out of the given order specifiers
*
* @param args order
* @return expression for order
*/
public static Expression<?> orderBy(List<OrderSpecifier<?>> args) {
return operation(Object.class, Ops.ORDER, ConstantImpl.create(args));
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.