name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_IncrementalTableBackupClient_isActiveWalPath | /**
* Check if a given path is belongs to active WAL directory
* @param p path
* @return true, if yes
*/
protected boolean isActiveWalPath(Path p) {
return !AbstractFSWALProvider.isArchivedLogFile(p);
} | 3.68 |
framework_Notification_getPosition | /**
* Gets the position of the notification message.
*
* @return The position
*/
public Position getPosition() {
return getState(false).position;
} | 3.68 |
graphhopper_WaySegmentParser_setElevationProvider | /**
* @param elevationProvider used to determine the elevation of an OSM node
*/
public Builder setElevationProvider(ElevationProvider elevationProvider) {
waySegmentParser.elevationProvider = elevationProvider;
return this;
} | 3.68 |
pulsar_PersistentSubscription_disconnect | /**
* Disconnect all consumers attached to the dispatcher and close this subscription.
*
* @return CompletableFuture indicating the completion of disconnect operation
*/
@Override
public synchronized CompletableFuture<Void> disconnect() {
if (fenceFuture != null){
return fenceFuture;
}
fenceFuture = new CompletableFuture<>();
// block any further consumers on this subscription
IS_FENCED_UPDATER.set(this, TRUE);
(dispatcher != null ? dispatcher.close() : CompletableFuture.completedFuture(null))
.thenCompose(v -> close()).thenRun(() -> {
log.info("[{}][{}] Successfully disconnected and closed subscription", topicName, subName);
fenceFuture.complete(null);
}).exceptionally(exception -> {
log.error("[{}][{}] Error disconnecting consumers from subscription", topicName, subName,
exception);
fenceFuture.completeExceptionally(exception);
resumeAfterFence();
return null;
});
return fenceFuture;
} | 3.68 |
hbase_ReplicationThrottler_getNextSleepInterval | /**
* Get how long the caller should sleep according to the current size and current cycle's total
* push size and start tick, return the sleep interval for throttling control.
* @param size is the size of edits to be pushed
* @return sleep interval for throttling control
*/
public long getNextSleepInterval(final int size) {
if (!this.enabled) {
return 0;
}
long sleepTicks = 0;
long now = EnvironmentEdgeManager.currentTime();
// 1. if cyclePushSize exceeds bandwidth, we need to sleep some
// following cycles to amortize, this case can occur when a single push
// exceeds the bandwidth
if ((double) this.cyclePushSize > bandwidth) {
double cycles = Math.ceil((double) this.cyclePushSize / bandwidth);
long shouldTillTo = this.cycleStartTick + (long) (cycles * 100);
if (shouldTillTo > now) {
sleepTicks = shouldTillTo - now;
} else {
// no reset in shipEdits since no sleep, so we need to reset cycleStartTick here!
this.cycleStartTick = now;
}
this.cyclePushSize = 0;
} else {
long nextCycleTick = this.cycleStartTick + 100; // a cycle is 100ms
if (now >= nextCycleTick) {
// 2. switch to next cycle if the current cycle has passed
this.cycleStartTick = now;
this.cyclePushSize = 0;
} else if (this.cyclePushSize > 0 && (double) (this.cyclePushSize + size) >= bandwidth) {
// 3. delay the push to next cycle if exceeds throttling bandwidth.
// enforcing cyclePushSize > 0 to avoid the unnecessary sleep for case
// where a cycle's first push size(currentSize) > bandwidth
sleepTicks = nextCycleTick - now;
this.cyclePushSize = 0;
}
}
return sleepTicks;
} | 3.68 |
hadoop_Preconditions_checkNotNull | /**
* Preconditions that the specified argument is not {@code null},
* throwing a NPE exception otherwise.
*
* <p>The message of the exception is {@code msgSupplier.get()}.</p>
*
* @param <T> the object type
* @param obj the object to check
* @param msgSupplier the {@link Supplier#get()} set the
* exception message if valid. Otherwise,
* the message is {@link #VALIDATE_IS_NOT_NULL_EX_MESSAGE}
* @return the validated object (never {@code null} for method chaining)
* @throws NullPointerException if the object is {@code null}
*/
public static <T> T checkNotNull(final T obj,
final Supplier<String> msgSupplier) {
if (obj == null) {
String msg;
try {
// note that we can get NPE evaluating the message itself;
// but we do not want this to override the actual NPE.
msg = msgSupplier.get();
} catch (Exception e) {
// ideally we want to log the error to capture. This may cause log files
// to bloat. On the other hand, swallowing the exception may hide a bug
// in the caller. Debug level is a good compromise between the two
// concerns.
LOG.debug("Error formatting message", e);
msg = VALIDATE_IS_NOT_NULL_EX_MESSAGE;
}
throw new NullPointerException(msg);
}
return obj;
} | 3.68 |
hbase_WALObserver_postWALRoll | /**
* Called after rolling the current WAL
* @param oldPath the path of the wal that we replaced
* @param newPath the path of the wal we have created and now is the current
*/
default void postWALRoll(ObserverContext<? extends WALCoprocessorEnvironment> ctx, Path oldPath,
Path newPath) throws IOException {
} | 3.68 |
framework_AbstractDateField_setPreventInvalidInput | /**
* Control whether value change event is emitted when user input value does
* not meet the integrated range validator.
*
* @param preventInvalidInput
* Set to false to disable the value change event.
*
* @since 8.13
*/
public void setPreventInvalidInput(boolean preventInvalidInput) {
this.preventInvalidInput = preventInvalidInput;
} | 3.68 |
morf_ConnectionResources_setFetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming | /**
* Sets the JDBC Fetch Size to use when performing bulk select operations while allowing connection use, intended to replace the default in {@link SqlDialect#fetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming()}.
* The default behaviour for this method is interpreted as not setting the value.
* @param fetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming the JDBC fetch size to use.
*/
public default void setFetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming(Integer fetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming){
} | 3.68 |
hadoop_EntityGroupFSTimelineStoreMetrics_getEntitiesReadToSummary | // Getters
MutableCounterLong getEntitiesReadToSummary() {
return entitiesReadToSummary;
} | 3.68 |
hadoop_SaslOutputStream_write | /**
* Writes <code>len</code> bytes from the specified byte array starting at
* offset <code>off</code> to this output stream.
*
* @param inBuf
* the data.
* @param off
* the start offset in the data.
* @param len
* the number of bytes to write.
* @exception IOException
* if an I/O error occurs.
*/
@Override
public void write(byte[] inBuf, int off, int len) throws IOException {
if (!useWrap) {
outStream.write(inBuf, off, len);
return;
}
try {
if (saslServer != null) { // using saslServer
saslToken = saslServer.wrap(inBuf, off, len);
} else { // using saslClient
saslToken = saslClient.wrap(inBuf, off, len);
}
} catch (SaslException se) {
try {
disposeSasl();
} catch (SaslException ignored) {
}
throw se;
}
if (saslToken != null) {
ByteArrayOutputStream byteOut = new ByteArrayOutputStream();
DataOutputStream dout = new DataOutputStream(byteOut);
dout.writeInt(saslToken.length);
outStream.write(byteOut.toByteArray());
outStream.write(saslToken, 0, saslToken.length);
saslToken = null;
}
} | 3.68 |
flink_RawType_getSerializerString | /**
* Returns the serialized {@link TypeSerializerSnapshot} in Base64 encoding of this raw type.
*/
public String getSerializerString() {
if (serializerString == null) {
final DataOutputSerializer outputSerializer = new DataOutputSerializer(128);
try {
TypeSerializerSnapshot.writeVersionedSnapshot(
outputSerializer, serializer.snapshotConfiguration());
serializerString =
EncodingUtils.encodeBytesToBase64(outputSerializer.getCopyOfBuffer());
return serializerString;
} catch (Exception e) {
throw new TableException(
String.format(
"Unable to generate a string representation of the serializer snapshot of '%s' "
+ "describing the class '%s' for the RAW type.",
serializer.getClass().getName(), clazz.toString()),
e);
}
}
return serializerString;
} | 3.68 |
flink_FlinkMatchers_findThrowable | // copied from flink-core to not mess up the dependency design too much, just for a little
// utility method
private static Optional<Throwable> findThrowable(
Throwable throwable, Predicate<Throwable> predicate) {
if (throwable == null || predicate == null) {
return Optional.empty();
}
Throwable t = throwable;
while (t != null) {
if (predicate.test(t)) {
return Optional.of(t);
} else {
t = t.getCause();
}
}
return Optional.empty();
} | 3.68 |
framework_GridSingleSelect_select | /**
* Selects the given item. If another item was already selected, that item
* is deselected.
*
* @param item
* the item to select
*/
public void select(T item) {
model.select(item);
} | 3.68 |
hbase_HBaseCluster_isDistributedCluster | /**
* @return whether we are interacting with a distributed cluster as opposed to an in-process
* mini/local cluster.
*/
public boolean isDistributedCluster() {
return false;
} | 3.68 |
flink_ResourceProfile_setExtendedResource | /**
* Add the given extended resource. The old value with the same resource name will be
* replaced if present.
*/
public Builder setExtendedResource(ExternalResource extendedResource) {
this.extendedResources.put(extendedResource.getName(), extendedResource);
return this;
} | 3.68 |
framework_VRadioButtonGroup_updateItemSelection | /**
* Updates the selected state of a radio button.
*
* @param radioButton
* the radio button to update
* @param value
* {@code true} if selected; {@code false} if not
*/
protected void updateItemSelection(RadioButton radioButton, boolean value) {
radioButton.setValue(value);
radioButton.setStyleName(CLASSNAME_OPTION_SELECTED, value);
} | 3.68 |
MagicPlugin_MapController_getAll | // Public API
@Override
public List<com.elmakers.mine.bukkit.api.maps.URLMap> getAll() {
return new ArrayList<>(idMap.values());
} | 3.68 |
flink_MutableHashTable_hash | /**
* The level parameter is needed so that we can have different hash functions when we
* recursively apply the partitioning, so that the working set eventually fits into memory.
*/
public static int hash(int code, int level) {
final int rotation = level * 11;
code = Integer.rotateLeft(code, rotation);
return MathUtils.jenkinsHash(code);
} | 3.68 |
hbase_JSONMetricUtil_dumpBeanToString | /**
* Returns a subset of mbeans defined by qry. Modeled after DumpRegionServerMetrics#dumpMetrics.
* Example: String qry= "java.lang:type=Memory"
* @throws MalformedObjectNameException if json have bad format
* @throws IOException /
* @return String representation of json array.
*/
public static String dumpBeanToString(String qry)
throws MalformedObjectNameException, IOException {
StringWriter sw = new StringWriter(1024 * 100); // Guess this size
try (PrintWriter writer = new PrintWriter(sw)) {
JSONBean dumper = new JSONBean();
try (JSONBean.Writer jsonBeanWriter = dumper.open(writer)) {
MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
jsonBeanWriter.write(mbeanServer, new ObjectName(qry), null, false);
}
}
sw.close();
return sw.toString();
} | 3.68 |
pulsar_LinuxInfoUtils_checkHasNicSpeeds | /**
* Check this VM has nic speed.
* @return Whether the VM has nic speed
*/
public static boolean checkHasNicSpeeds() {
List<String> physicalNICs = getUsablePhysicalNICs();
if (CollectionUtils.isEmpty(physicalNICs)) {
return false;
}
double totalNicLimit = getTotalNicLimit(physicalNICs, BitRateUnit.Kilobit);
return totalNicLimit > 0;
} | 3.68 |
flink_AfterMatchSkipStrategy_noSkip | /**
* Every possible match will be emitted.
*
* @return the created AfterMatchSkipStrategy
*/
public static NoSkipStrategy noSkip() {
return NoSkipStrategy.INSTANCE;
} | 3.68 |
flink_MetricRegistryImpl_isShutdown | /**
* Returns whether this registry has been shutdown.
*
* @return true, if this registry was shutdown, otherwise false
*/
public boolean isShutdown() {
synchronized (lock) {
return isShutdown;
}
} | 3.68 |
flink_ProjectOperator_projectTuple19 | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18>
ProjectOperator<
T,
Tuple19<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18>>
projectTuple19() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<
Tuple19<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18>>
tType =
new TupleTypeInfo<
Tuple19<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18>>(fTypes);
return new ProjectOperator<
T,
Tuple19<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18>>(this.ds, this.fieldIndexes, tType);
} | 3.68 |
hbase_KeyValue_setKey | /**
* A setter that helps to avoid object creation every time and whenever there is a need to
* create new KeyOnlyKeyValue.
* @param key Key to set
* @param offset Offset of the Key
* @param length length of the Key
*/
public void setKey(byte[] key, int offset, int length) {
this.bytes = key;
this.offset = offset;
this.length = length;
this.rowLen = Bytes.toShort(this.bytes, this.offset);
} | 3.68 |
framework_ClientSideCriterion_isClientSideVerifiable | /*
* All criteria that extend this must be completely validatable on client
* side.
*
* (non-Javadoc)
*
* @see
* com.vaadin.event.dd.acceptCriteria.AcceptCriterion#isClientSideVerifiable
* ()
*/
@Override
public final boolean isClientSideVerifiable() {
return true;
} | 3.68 |
flink_SlotSharingGroup_setTaskHeapMemoryMB | /** Set the task heap memory for this SlotSharingGroup in MB. */
public Builder setTaskHeapMemoryMB(int taskHeapMemoryMB) {
checkArgument(taskHeapMemoryMB > 0, "The task heap memory should be positive.");
this.taskHeapMemory = MemorySize.ofMebiBytes(taskHeapMemoryMB);
return this;
} | 3.68 |
hadoop_OBSDataBlocks_read | /**
* Read in data.
*
* @param b destination buffer
* @param offset offset within the buffer
* @param length length of bytes to read
* @return read size
* @throws EOFException if the position is negative
* @throws IndexOutOfBoundsException if there isn't space for the amount
* of data requested.
* @throws IllegalArgumentException other arguments are invalid.
*/
public synchronized int read(final byte[] b, final int offset,
final int length)
throws IOException {
Preconditions.checkArgument(length >= 0, "length is negative");
Preconditions.checkArgument(b != null, "Null buffer");
if (b.length - offset < length) {
throw new IndexOutOfBoundsException(
FSExceptionMessages.TOO_MANY_BYTES_FOR_DEST_BUFFER
+ ": request length ="
+ length
+ ", with offset ="
+ offset
+ "; buffer capacity ="
+ (b.length - offset));
}
verifyOpen();
if (!hasRemaining()) {
return -1;
}
int toRead = Math.min(length, available());
byteBuffer.get(b, offset, toRead);
return toRead;
} | 3.68 |
dubbo_ExpiringMap_setTimeToLive | /**
* update time to live
*
* @param timeToLive time to live
*/
public void setTimeToLive(long timeToLive) {
this.timeToLiveMillis = timeToLive * 1000;
} | 3.68 |
flink_CompactingHashTable_getOverflowSegmentCount | /** @return number of memory segments used in overflow buckets */
private int getOverflowSegmentCount() {
int result = 0;
for (InMemoryPartition<T> p : this.partitions) {
result += p.numOverflowSegments;
}
return result;
} | 3.68 |
hudi_MysqlDebeziumSource_processDataset | /**
* Debezium Kafka Payload has a nested structure (see https://debezium.io/documentation/reference/1.4/connectors/mysql.html).
* This function flattens this nested structure for the Mysql data, and also extracts a subset of Debezium metadata fields.
*
* @param rowDataset Dataset containing Debezium Payloads
* @return New dataset with flattened columns
*/
@Override
protected Dataset<Row> processDataset(Dataset<Row> rowDataset) {
Dataset<Row> flattenedDataset = rowDataset;
if (rowDataset.columns().length > 0) {
// Only flatten for non-empty schemas
Dataset<Row> insertedOrUpdatedData = rowDataset
.selectExpr(
String.format("%s as %s", DebeziumConstants.INCOMING_OP_FIELD, DebeziumConstants.FLATTENED_OP_COL_NAME),
String.format("%s as %s", DebeziumConstants.INCOMING_TS_MS_FIELD, DebeziumConstants.UPSTREAM_PROCESSING_TS_COL_NAME),
String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_NAME_FIELD, DebeziumConstants.FLATTENED_SHARD_NAME),
String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_TS_MS_FIELD, DebeziumConstants.FLATTENED_TS_COL_NAME),
String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_FILE_FIELD, DebeziumConstants.FLATTENED_FILE_COL_NAME),
String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_POS_FIELD, DebeziumConstants.FLATTENED_POS_COL_NAME),
String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_ROW_FIELD, DebeziumConstants.FLATTENED_ROW_COL_NAME),
String.format("%s.*", DebeziumConstants.INCOMING_AFTER_FIELD)
)
.filter(rowDataset.col(DebeziumConstants.INCOMING_OP_FIELD).notEqual(DebeziumConstants.DELETE_OP));
Dataset<Row> deletedData = rowDataset
.selectExpr(
String.format("%s as %s", DebeziumConstants.INCOMING_OP_FIELD, DebeziumConstants.FLATTENED_OP_COL_NAME),
String.format("%s as %s", DebeziumConstants.INCOMING_TS_MS_FIELD, DebeziumConstants.UPSTREAM_PROCESSING_TS_COL_NAME),
String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_NAME_FIELD, DebeziumConstants.FLATTENED_SHARD_NAME),
String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_TS_MS_FIELD, DebeziumConstants.FLATTENED_TS_COL_NAME),
String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_FILE_FIELD, DebeziumConstants.FLATTENED_FILE_COL_NAME),
String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_POS_FIELD, DebeziumConstants.FLATTENED_POS_COL_NAME),
String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_ROW_FIELD, DebeziumConstants.FLATTENED_ROW_COL_NAME),
String.format("%s.*", DebeziumConstants.INCOMING_BEFORE_FIELD)
)
.filter(rowDataset.col(DebeziumConstants.INCOMING_OP_FIELD).equalTo(DebeziumConstants.DELETE_OP));
flattenedDataset = insertedOrUpdatedData.union(deletedData);
}
return flattenedDataset.withColumn(DebeziumConstants.ADDED_SEQ_COL_NAME,
callUDF(generateUniqueSeqUdfFn, flattenedDataset.col(DebeziumConstants.FLATTENED_FILE_COL_NAME),
flattenedDataset.col(DebeziumConstants.FLATTENED_POS_COL_NAME)));
} | 3.68 |
hbase_HBaseTestingUtility_createMultiRegionTable | /**
* Create a table with multiple regions.
* @return A Table instance for the created table.
*/
public Table createMultiRegionTable(TableName tableName, byte[] family) throws IOException {
return createTable(tableName, family, KEYS_FOR_HBA_CREATE_TABLE);
} | 3.68 |
hadoop_DataJoinReducerBase_regroup | /**
* This is the function that re-groups values for a key into sub-groups based
* on a secondary key (input tag).
*
* @param arg1
* @return
*/
private SortedMap<Object, ResetableIterator> regroup(Object key,
Iterator arg1, Reporter reporter) throws IOException {
this.numOfValues = 0;
SortedMap<Object, ResetableIterator> retv = new TreeMap<Object, ResetableIterator>();
TaggedMapOutput aRecord = null;
while (arg1.hasNext()) {
this.numOfValues += 1;
if (this.numOfValues % 100 == 0) {
reporter.setStatus("key: " + key.toString() + " numOfValues: "
+ this.numOfValues);
}
if (this.numOfValues > this.maxNumOfValuesPerGroup) {
continue;
}
aRecord = ((TaggedMapOutput) arg1.next()).clone(job);
Text tag = aRecord.getTag();
ResetableIterator data = retv.get(tag);
if (data == null) {
data = createResetableIterator();
retv.put(tag, data);
}
data.add(aRecord);
}
if (this.numOfValues > this.largestNumOfValues) {
this.largestNumOfValues = numOfValues;
LOG.info("key: " + key.toString() + " this.largestNumOfValues: "
+ this.largestNumOfValues);
}
return retv;
} | 3.68 |
framework_Table_handleSelectedItems | /**
* Handles selection if selection is a multiselection
*
* @param variables
* The variables
*/
private void handleSelectedItems(Map<String, Object> variables) {
final String[] ka = (String[]) variables.get("selected");
final String[] ranges = (String[]) variables.get("selectedRanges");
Set<Object> renderedButNotSelectedItemIds = getCurrentlyRenderedItemIds();
@SuppressWarnings("unchecked")
HashSet<Object> newValue = new LinkedHashSet<Object>(
(Collection<Object>) getValue());
if (variables.containsKey("clearSelections")) {
// the client side has instructed to swipe all previous selections
newValue.clear();
}
/*
* Then add (possibly some of them back) rows that are currently
* selected on the client side (the ones that the client side is aware
* of).
*/
for (String k : ka) {
// key to id
final Object id = itemIdMapper.get(k);
if (!isNullSelectionAllowed()
&& (id == null || id == getNullSelectionItemId())) {
// skip empty selection if nullselection is not allowed
markAsDirty();
} else if (id != null && containsId(id)) {
newValue.add(id);
renderedButNotSelectedItemIds.remove(id);
}
}
/* Add range items aka shift clicked multiselection areas */
if (ranges != null) {
for (String range : ranges) {
String[] split = range.split("-");
Object startItemId = itemIdMapper.get(split[0]);
int length = Integer.valueOf(split[1]);
LinkedHashSet<Object> itemIdsInRange = getItemIdsInRange(
startItemId, length);
newValue.addAll(itemIdsInRange);
renderedButNotSelectedItemIds.removeAll(itemIdsInRange);
}
}
/*
* finally clear all currently rendered rows (the ones that the client
* side counterpart is aware of) that the client didn't send as selected
*/
newValue.removeAll(renderedButNotSelectedItemIds);
if (!isNullSelectionAllowed() && newValue.isEmpty()) {
// empty selection not allowed, keep old value
markAsDirty();
return;
}
setValue(newValue, true);
} | 3.68 |
framework_GridElement_getBody | /**
* Get the body element.
*
* @return the tbody element
*/
public TestBenchElement getBody() {
return getSubPart("#cell");
} | 3.68 |
shardingsphere-elasticjob_JobScheduleController_shutdown | /**
* Shutdown scheduler graceful.
* @param isCleanShutdown if wait jobs complete
*/
public synchronized void shutdown(final boolean isCleanShutdown) {
try {
if (!scheduler.isShutdown()) {
scheduler.shutdown(isCleanShutdown);
}
} catch (final SchedulerException ex) {
throw new JobSystemException(ex);
}
} | 3.68 |
shardingsphere-elasticjob_InstanceService_removeInstance | /**
* Persist job instance.
*/
public void removeInstance() {
jobNodeStorage.removeJobNodeIfExisted(instanceNode.getLocalInstancePath());
} | 3.68 |
hudi_StreamerUtil_partitionExists | /**
* Returns whether the hoodie partition exists under given table path {@code tablePath} and partition path {@code partitionPath}.
*
* @param tablePath Base path of the table.
* @param partitionPath The path of the partition.
* @param hadoopConf The hadoop configuration.
*/
public static boolean partitionExists(String tablePath, String partitionPath, org.apache.hadoop.conf.Configuration hadoopConf) {
// Hadoop FileSystem
FileSystem fs = FSUtils.getFs(tablePath, hadoopConf);
try {
return fs.exists(new Path(tablePath, partitionPath));
} catch (IOException e) {
throw new HoodieException(String.format("Error while checking whether partition exists under table path [%s] and partition path [%s]", tablePath, partitionPath), e);
}
} | 3.68 |
hadoop_SharedKeyCredentials_parseQueryString | /**
* Parses a query string into a one to many hashmap.
*
* @param parseString the string to parse
* @return a HashMap<String, String[]> of the key values.
*/
private static HashMap<String, String[]> parseQueryString(String parseString) throws UnsupportedEncodingException {
final HashMap<String, String[]> retVals = new HashMap<>();
if (parseString == null || parseString.isEmpty()) {
return retVals;
}
// 1. Remove ? if present
final int queryDex = parseString.indexOf(AbfsHttpConstants.QUESTION_MARK);
if (queryDex >= 0 && parseString.length() > 0) {
parseString = parseString.substring(queryDex + 1);
}
// 2. split name value pairs by splitting on the 'c&' character
final String[] valuePairs = parseString.contains(AbfsHttpConstants.AND_MARK)
? parseString.split(AbfsHttpConstants.AND_MARK)
: parseString.split(AbfsHttpConstants.SEMICOLON);
// 3. for each field value pair parse into appropriate map entries
for (int m = 0; m < valuePairs.length; m++) {
final int equalDex = valuePairs[m].indexOf(AbfsHttpConstants.EQUAL);
if (equalDex < 0 || equalDex == valuePairs[m].length() - 1) {
continue;
}
String key = valuePairs[m].substring(0, equalDex);
String value = valuePairs[m].substring(equalDex + 1);
key = safeDecode(key);
value = safeDecode(value);
// 3.1 add to map
String[] values = retVals.get(key);
if (values == null) {
values = new String[]{value};
if (!value.equals("")) {
retVals.put(key, values);
}
}
}
return retVals;
} | 3.68 |
hibernate-validator_BeanMetaDataManagerImpl_createBeanMetaData | /**
* Creates a {@link org.hibernate.validator.internal.metadata.aggregated.BeanMetaData} containing the meta data from all meta
* data providers for the given type and its hierarchy.
*
* @param <T> The type of interest.
* @param clazz The type's class.
*
* @return A bean meta data object for the given type.
*/
private <T> BeanMetaDataImpl<T> createBeanMetaData(Class<T> clazz) {
BeanMetaDataBuilder<T> builder = BeanMetaDataBuilder.getInstance(
constraintCreationContext, executableHelper, parameterNameProvider,
validationOrderGenerator, clazz, methodValidationConfiguration );
for ( MetaDataProvider provider : metaDataProviders ) {
for ( BeanConfiguration<? super T> beanConfiguration : getBeanConfigurationForHierarchy( provider, clazz ) ) {
builder.add( beanConfiguration );
}
}
return builder.build();
} | 3.68 |
hadoop_AbfsInputStream_toString | /**
* Get the statistics of the stream.
* @return a string value.
*/
@Override
public String toString() {
final StringBuilder sb = new StringBuilder(super.toString());
sb.append("AbfsInputStream@(").append(this.hashCode()).append("){");
sb.append("[" + CAPABILITY_SAFE_READAHEAD + "]");
if (streamStatistics != null) {
sb.append(", ").append(streamStatistics);
}
sb.append("}");
return sb.toString();
} | 3.68 |
flink_ErrorInfo_createErrorInfoWithNullableCause | /**
* Instantiates an {@code ErrorInfo} to cover inconsistent behavior due to FLINK-21376.
*
* @param exception The error cause that might be {@code null}.
* @param timestamp The timestamp the error was noticed.
* @return a {@code ErrorInfo} containing a generic {@link FlinkException} in case of a missing
* error cause.
*/
public static ErrorInfo createErrorInfoWithNullableCause(
@Nullable Throwable exception, long timestamp) {
return new ErrorInfo(handleMissingThrowable(exception), timestamp);
} | 3.68 |
flink_MemorySegmentFactory_wrapOffHeapMemory | /**
* Creates a memory segment that wraps the off-heap memory backing the given ByteBuffer. Note
* that the ByteBuffer needs to be a <i>direct ByteBuffer</i>.
*
* <p>This method is intended to be used for components which pool memory and create memory
* segments around long-lived memory regions.
*
* @param memory The byte buffer with the off-heap memory to be represented by the memory
* segment.
* @return A new memory segment representing the given off-heap memory.
*/
public static MemorySegment wrapOffHeapMemory(ByteBuffer memory) {
return new MemorySegment(memory, null);
} | 3.68 |
hadoop_RegexMountPointResolvedDstPathReplaceInterceptor_interceptSource | /**
* Source won't be changed in the interceptor.
*
* @return source param string passed in.
*/
@Override
public String interceptSource(String source) {
return source;
} | 3.68 |
rocketmq-connect_MetricsReporter_onTimerAdded | /**
* Called when a {@link Timer} is added to the registry.
*
* @param name the timer's name
* @param timer the timer
*/
public void onTimerAdded(String name, Timer timer) {
this.onTimerAdded(MetricUtils.stringToMetricName(name), timer);
} | 3.68 |
hudi_HoodieTable_validateSchema | /**
* Ensure that the current writerSchema is compatible with the latest schema of this dataset.
*
* When inserting/updating data, we read records using the last used schema and convert them to the
* GenericRecords with writerSchema. Hence, we need to ensure that this conversion can take place without errors.
*/
private void validateSchema() throws HoodieUpsertException, HoodieInsertException {
boolean shouldValidate = config.shouldValidateAvroSchema();
boolean allowProjection = config.shouldAllowAutoEvolutionColumnDrop();
if ((!shouldValidate && allowProjection)
|| getActiveTimeline().getCommitsTimeline().filterCompletedInstants().empty()
|| StringUtils.isNullOrEmpty(config.getSchema())
) {
// Check not required
return;
}
try {
TableSchemaResolver schemaResolver = new TableSchemaResolver(getMetaClient());
Option<Schema> existingTableSchema = schemaResolver.getTableAvroSchemaIfPresent(false);
if (!existingTableSchema.isPresent()) {
return;
}
Schema writerSchema = HoodieAvroUtils.createHoodieWriteSchema(config.getSchema());
Schema tableSchema = HoodieAvroUtils.createHoodieWriteSchema(existingTableSchema.get());
AvroSchemaUtils.checkSchemaCompatible(tableSchema, writerSchema, shouldValidate, allowProjection, getDropPartitionColNames());
} catch (Exception e) {
throw new HoodieException("Failed to read schema/check compatibility for base path " + metaClient.getBasePath(), e);
}
} | 3.68 |
morf_GraphBasedUpgradeNode_requiresExclusiveExecution | /**
* @return true if this node should be executed in an exclusive way (no other
* node should be executed while this one is being processed)
*/
public boolean requiresExclusiveExecution() {
return exclusiveExecution || reads.isEmpty() && modifies.isEmpty();
} | 3.68 |
framework_VCalendarPanel_selectFocused | /**
* Updates year, month, day from focusedDate to value
*/
private void selectFocused() {
if (focusedDate != null && isDateInsideRange(focusedDate, resolution)) {
if (value == null) {
// No previously selected value (set to null on server side).
// Create a new date using current date and time
value = new Date();
}
/*
* #5594 set Date (day) to 1 in order to prevent any kind of
* wrapping of months when later setting the month. (e.g. 31 ->
* month with 30 days -> wraps to the 1st of the following month,
* e.g. 31st of May -> 31st of April = 1st of May)
*/
value.setDate(1);
if (value.getYear() != focusedDate.getYear()) {
value.setYear(focusedDate.getYear());
}
if (value.getMonth() != focusedDate.getMonth()) {
value.setMonth(focusedDate.getMonth());
}
if (value.getDate() != focusedDate.getDate()) {
}
// We always need to set the date, even if it hasn't changed, since
// it was forced to 1 above.
value.setDate(focusedDate.getDate());
selectDate(focusedDate);
} else {
getLogger().info("Trying to select a the focused date which is NULL!");
}
} | 3.68 |
framework_AbstractSelect_setNewItemsAllowed | /**
* Enables or disables possibility to add new options by the user.
*
* @param allowNewOptions
* the New value of property allowNewOptions.
*/
public void setNewItemsAllowed(boolean allowNewOptions) {
// Only handle change requests
if (this.allowNewOptions != allowNewOptions) {
this.allowNewOptions = allowNewOptions;
markAsDirty();
}
} | 3.68 |
hudi_HoodieIndexUtils_getLatestBaseFilesForPartition | /**
* Fetches Pair of partition path and {@link HoodieBaseFile}s for interested partitions.
*
* @param partition Partition of interest
* @param hoodieTable Instance of {@link HoodieTable} of interest
* @return the list of {@link HoodieBaseFile}
*/
public static List<HoodieBaseFile> getLatestBaseFilesForPartition(String partition,
HoodieTable hoodieTable) {
Option<HoodieInstant> latestCommitTime = hoodieTable.getMetaClient().getCommitsTimeline()
.filterCompletedInstants().lastInstant();
if (latestCommitTime.isPresent()) {
return hoodieTable.getBaseFileOnlyView()
.getLatestBaseFilesBeforeOrOn(partition, latestCommitTime.get().getTimestamp())
.collect(toList());
}
return Collections.emptyList();
} | 3.68 |
hudi_MarkerUtils_doesMarkerTypeFileExist | /**
* @param fileSystem file system to use.
* @param markerDir marker directory.
* @return {@code true} if the MARKERS.type file exists; {@code false} otherwise.
*/
public static boolean doesMarkerTypeFileExist(FileSystem fileSystem, String markerDir) throws IOException {
return fileSystem.exists(new Path(markerDir, MARKER_TYPE_FILENAME));
} | 3.68 |
flink_WindowReader_evictor | /** Reads from a window that uses an evictor. */
public EvictingWindowReader<W> evictor() {
return new EvictingWindowReader<>(env, metadata, stateBackend, windowSerializer);
} | 3.68 |
hbase_CompactingMemStore_stopReplayingFromWAL | /**
* This message intends to inform the MemStore that the replaying edits from WAL are done
*/
@Override
public void stopReplayingFromWAL() {
inWalReplay = false;
} | 3.68 |
hadoop_BinaryPartitioner_getPartition | /**
* Use (the specified slice of the array returned by)
* {@link BinaryComparable#getBytes()} to partition.
*/
@Override
public int getPartition(BinaryComparable key, V value, int numPartitions) {
int length = key.getLength();
int leftIndex = (leftOffset + length) % length;
int rightIndex = (rightOffset + length) % length;
int hash = WritableComparator.hashBytes(key.getBytes(),
leftIndex, rightIndex - leftIndex + 1);
return (hash & Integer.MAX_VALUE) % numPartitions;
} | 3.68 |
framework_FieldGroup_commit | /**
* Commits all changes done to the bound fields.
* <p>
* Calls all {@link CommitHandler}s before and after committing the field
* changes to the item data source. The whole commit is aborted and state is
* restored to what it was before commit was called if any
* {@link CommitHandler} throws a CommitException or there is a problem
* committing the fields
*
* @throws CommitException
* If the commit was aborted
*/
public void commit() throws CommitException {
if (!isBuffered()) {
// Not using buffered mode, nothing to do
return;
}
startTransactions();
try {
firePreCommitEvent();
Map<Field<?>, InvalidValueException> invalidValueExceptions = commitFields();
if (invalidValueExceptions.isEmpty()) {
firePostCommitEvent();
commitTransactions();
} else {
throw new FieldGroupInvalidValueException(
invalidValueExceptions);
}
} catch (Exception e) {
rollbackTransactions();
throw new CommitException("Commit failed", this, e);
}
} | 3.68 |
hadoop_TimelineEntity_addPrimaryFilters | /**
* Add a map of primary filters to the existing primary filter map
*
* @param primaryFilters
* a map of primary filters
*/
public void addPrimaryFilters(Map<String, Set<Object>> primaryFilters) {
for (Entry<String, Set<Object>> primaryFilter : primaryFilters.entrySet()) {
Set<Object> thisPrimaryFilter =
this.primaryFilters.get(primaryFilter.getKey());
if (thisPrimaryFilter == null) {
this.primaryFilters.put(
primaryFilter.getKey(), primaryFilter.getValue());
} else {
thisPrimaryFilter.addAll(primaryFilter.getValue());
}
}
} | 3.68 |
cron-utils_CronFieldName_getOrder | /**
* Returns the order number that corresponds to the field.
*
* @return order number - int
*/
public int getOrder() {
return order;
} | 3.68 |
hudi_SparkValidatorUtils_getRecordsFromPendingCommits | /**
* Get reads from partitions modified including any inflight commits.
* Note that this only works for COW tables
*/
public static Dataset<Row> getRecordsFromPendingCommits(SQLContext sqlContext,
Set<String> partitionsAffected,
HoodieWriteMetadata<HoodieData<WriteStatus>> writeMetadata,
HoodieTable table,
String instantTime) {
// build file system view with pending commits
HoodieTablePreCommitFileSystemView fsView = new HoodieTablePreCommitFileSystemView(table.getMetaClient(),
table.getHoodieView(),
writeMetadata.getWriteStats().get(),
writeMetadata.getPartitionToReplaceFileIds(),
instantTime);
List<String> newFiles = partitionsAffected.stream()
.flatMap(partition -> fsView.getLatestBaseFiles(partition).map(BaseFile::getPath))
.collect(Collectors.toList());
if (newFiles.isEmpty()) {
return sqlContext.emptyDataFrame();
}
return readRecordsForBaseFiles(sqlContext, newFiles);
} | 3.68 |
framework_ComputedStyle_getPaddingWidth | /**
* Returns the sum of the top and bottom padding.
*
* @since 7.5.3
* @return the sum of the left and right padding
*/
public double getPaddingWidth() {
double paddingWidth = getDoubleProperty("paddingLeft");
paddingWidth += getDoubleProperty("paddingRight");
return paddingWidth;
} | 3.68 |
hbase_MiniHBaseCluster_getNumLiveRegionServers | /** Returns Number of live region servers in the cluster currently. */
public int getNumLiveRegionServers() {
return this.hbaseCluster.getLiveRegionServers().size();
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectOrderByNullsFirstDescendingScript | /**
* Tests a select with an "order by" clause with nulls first and descending direction.
*/
@Test
public void testSelectOrderByNullsFirstDescendingScript() {
FieldReference fieldReference = new FieldReference(STRING_FIELD);
SelectStatement stmt = new SelectStatement(fieldReference)
.from(new TableReference(ALTERNATE_TABLE))
.orderBy(fieldReference.desc().nullsFirst());
assertEquals("Select with descending order by", expectedSelectOrderByNullsFirstDesc(), testDialect.convertStatementToSQL(stmt));
} | 3.68 |
querydsl_Projections_fields | /**
* Create a field access based Bean populating projection for the given type and bindings
*
* @param <T> type of projection
* @param type type of the projection
* @param bindings field bindings
* @return factory expression
*/
public static <T> QBean<T> fields(Class<? extends T> type, Map<String, ? extends Expression<?>> bindings) {
return new QBean<T>(type, true, bindings);
} | 3.68 |
pulsar_AuthenticationProviderToken_authenticate | /**
* @param authData Authentication data.
* @return null. Explanation of returning null values, {@link AuthenticationState#authenticateAsync(AuthData)}
* @throws AuthenticationException
*/
@Override
public AuthData authenticate(AuthData authData) throws AuthenticationException {
String token = new String(authData.getBytes(), UTF_8);
checkExpiration(token);
this.authenticationDataSource = new AuthenticationDataCommand(token, remoteAddress, sslSession);
return null;
} | 3.68 |
hudi_FlinkWriteClients_createWriteClient | /**
* Creates the Flink write client.
*
* <p>This expects to be used by client, set flag {@code loadFsViewStorageConfig} to use
* remote filesystem view storage config, or an in-memory filesystem view storage is used.
*/
@SuppressWarnings("rawtypes")
public static HoodieFlinkWriteClient createWriteClient(Configuration conf, RuntimeContext runtimeContext, boolean loadFsViewStorageConfig) {
HoodieFlinkEngineContext context =
new HoodieFlinkEngineContext(
new SerializableConfiguration(HadoopConfigurations.getHadoopConf(conf)),
new FlinkTaskContextSupplier(runtimeContext));
HoodieWriteConfig writeConfig = getHoodieClientConfig(conf, loadFsViewStorageConfig);
return new HoodieFlinkWriteClient<>(context, writeConfig);
} | 3.68 |
framework_AbstractLayout_readMargin | /**
* Reads margin attributes from a design into a MarginInfo object. This
* helper method should be called from the
* {@link #readDesign(Element, DesignContext) readDesign} method of layouts
* that implement {@link MarginHandler}.
*
* @since 7.5
*
* @param design
* the design from which to read
* @param defMargin
* the default margin state for edges that are not set in the
* design
* @param context
* the DesignContext instance used for parsing the design
* @return the margin info
*/
protected MarginInfo readMargin(Element design, MarginInfo defMargin,
DesignContext context) {
if (design.hasAttr("margin")) {
boolean margin = DesignAttributeHandler.readAttribute("margin",
design.attributes(), boolean.class);
return new MarginInfo(margin);
} else {
boolean left = DesignAttributeHandler.readAttribute("margin-left",
design.attributes(), defMargin.hasLeft(), boolean.class);
boolean right = DesignAttributeHandler.readAttribute("margin-right",
design.attributes(), defMargin.hasRight(), boolean.class);
boolean top = DesignAttributeHandler.readAttribute("margin-top",
design.attributes(), defMargin.hasTop(), boolean.class);
boolean bottom = DesignAttributeHandler.readAttribute(
"margin-bottom", design.attributes(), defMargin.hasBottom(),
boolean.class);
return new MarginInfo(top, right, bottom, left);
}
} | 3.68 |
hudi_KafkaOffsetGen_fetchPartitionInfos | /**
* Fetch partition infos for given topic.
*
* @param consumer
* @param topicName
*/
private List<PartitionInfo> fetchPartitionInfos(KafkaConsumer consumer, String topicName) {
long timeout = getLongWithAltKeys(this.props, KafkaSourceConfig.KAFKA_FETCH_PARTITION_TIME_OUT);
long start = System.currentTimeMillis();
List<PartitionInfo> partitionInfos;
do {
// TODO(HUDI-4625) cleanup, introduce retrying client
partitionInfos = consumer.partitionsFor(topicName);
try {
if (partitionInfos == null) {
TimeUnit.SECONDS.sleep(10);
}
} catch (InterruptedException e) {
LOG.error("Sleep failed while fetching partitions");
}
} while (partitionInfos == null && (System.currentTimeMillis() <= (start + timeout)));
if (partitionInfos == null) {
throw new HoodieStreamerException(String.format("Can not find metadata for topic %s from kafka cluster", topicName));
}
return partitionInfos;
} | 3.68 |
hbase_AbstractStateMachineNamespaceProcedure_createDirectory | /**
* Create the namespace directory
* @param env MasterProcedureEnv
* @param nsDescriptor NamespaceDescriptor
*/
protected static void createDirectory(MasterProcedureEnv env, NamespaceDescriptor nsDescriptor)
throws IOException {
createDirectory(env.getMasterServices().getMasterFileSystem(), nsDescriptor);
} | 3.68 |
hadoop_ValueAggregatorBaseDescriptor_generateEntry | /**
*
* @param type the aggregation type
* @param id the aggregation id
* @param val the val associated with the id to be aggregated
* @return an Entry whose key is the aggregation id prefixed with
* the aggregation type.
*/
public static Entry<Text, Text> generateEntry(String type, String id, Text val) {
return org.apache.hadoop.mapreduce.lib.aggregate.
ValueAggregatorBaseDescriptor.generateEntry(type, id, val);
} | 3.68 |
framework_Criterion_getOperator | /**
* Gets the comparison operator.
*
* @return operator to be used when comparing payload value with criterion
*/
public ComparisonOperator getOperator() {
return operator;
} | 3.68 |
flink_ThrowableClassifier_findThrowableOfThrowableType | /**
* Checks whether a throwable chain contains a specific throwable type and returns the
* corresponding throwable.
*
* @param throwable the throwable chain to check.
* @param throwableType the throwable type to search for in the chain.
* @return Optional throwable of the throwable type if available, otherwise empty
*/
public static Optional<Throwable> findThrowableOfThrowableType(
Throwable throwable, ThrowableType throwableType) {
if (throwable == null || throwableType == null) {
return Optional.empty();
}
Throwable t = throwable;
while (t != null) {
final ThrowableAnnotation annotation =
t.getClass().getAnnotation(ThrowableAnnotation.class);
if (annotation != null && annotation.value() == throwableType) {
return Optional.of(t);
} else {
t = t.getCause();
}
}
return Optional.empty();
} | 3.68 |
pulsar_ManagedLedgerImpl_getPositionAfterN | /**
* Get the entry position at a given distance from a given position.
*
* @param startPosition
* starting position
* @param n
* number of entries to skip ahead
* @param startRange
* specifies whether to include the start position in calculating the distance
* @return the new position that is n entries ahead
*/
public PositionImpl getPositionAfterN(final PositionImpl startPosition, long n, PositionBound startRange) {
long entriesToSkip = n;
long currentLedgerId;
long currentEntryId;
if (startRange == PositionBound.startIncluded) {
currentLedgerId = startPosition.getLedgerId();
currentEntryId = startPosition.getEntryId();
} else {
PositionImpl nextValidPosition = getNextValidPosition(startPosition);
currentLedgerId = nextValidPosition.getLedgerId();
currentEntryId = nextValidPosition.getEntryId();
}
boolean lastLedger = false;
long totalEntriesInCurrentLedger;
while (entriesToSkip >= 0) {
// for the current ledger, the number of entries written is deduced from the lastConfirmedEntry
// for previous ledgers, LedgerInfo in ZK has the number of entries
if (currentLedger != null && currentLedgerId == currentLedger.getId()) {
lastLedger = true;
if (currentLedgerEntries > 0) {
totalEntriesInCurrentLedger = lastConfirmedEntry.getEntryId() + 1;
} else {
totalEntriesInCurrentLedger = 0;
}
} else {
LedgerInfo ledgerInfo = ledgers.get(currentLedgerId);
totalEntriesInCurrentLedger = ledgerInfo != null ? ledgerInfo.getEntries() : 0;
}
long unreadEntriesInCurrentLedger = totalEntriesInCurrentLedger > 0
? totalEntriesInCurrentLedger - currentEntryId : 0;
if (unreadEntriesInCurrentLedger >= entriesToSkip) {
// if the current ledger has more entries than what we need to skip
// then the return position is in the same ledger
currentEntryId += entriesToSkip;
break;
} else {
// skip remaining entry from the next ledger
entriesToSkip -= unreadEntriesInCurrentLedger;
if (lastLedger) {
// there are no more ledgers, return the last position
currentEntryId = totalEntriesInCurrentLedger;
break;
}
Long lid = ledgers.ceilingKey(currentLedgerId + 1);
currentLedgerId = lid != null ? lid : ledgers.lastKey();
currentEntryId = 0;
}
}
PositionImpl positionToReturn = getPreviousPosition(PositionImpl.get(currentLedgerId, currentEntryId));
if (positionToReturn.compareTo(lastConfirmedEntry) > 0) {
positionToReturn = lastConfirmedEntry;
}
if (log.isDebugEnabled()) {
log.debug("getPositionAfterN: Start position {}:{}, startIncluded: {}, Return position {}:{}",
startPosition.getLedgerId(), startPosition.getEntryId(), startRange, positionToReturn.getLedgerId(),
positionToReturn.getEntryId());
}
return positionToReturn;
} | 3.68 |
flink_TestLoggerResource_asSingleTestResource | /** Enables the use of {@link TestLoggerResource} for try-with-resources statement. */
public static SingleTestResource asSingleTestResource(
String loggerName, org.slf4j.event.Level level) throws Throwable {
return new SingleTestResource(loggerName, level);
} | 3.68 |
hadoop_YarnServerSecurityUtils_selectAMRMTokenIdentifier | // Obtain the needed AMRMTokenIdentifier from the remote-UGI. RPC layer
// currently sets only the required id, but iterate through anyways just to be
// sure.
private static AMRMTokenIdentifier selectAMRMTokenIdentifier(
UserGroupInformation remoteUgi) throws IOException {
AMRMTokenIdentifier result = null;
Set<TokenIdentifier> tokenIds = remoteUgi.getTokenIdentifiers();
for (TokenIdentifier tokenId : tokenIds) {
if (tokenId instanceof AMRMTokenIdentifier) {
result = (AMRMTokenIdentifier) tokenId;
break;
}
}
return result;
} | 3.68 |
MagicPlugin_Targeting_findTarget | /**
* Returns the block at the cursor, or null if out of range
*
* @return The target block
*/
protected Target findTarget(MageContext context, double range)
{
if (targetType == TargetType.NONE) {
return new Target(source);
}
boolean isBlock = targetType == TargetType.BLOCK || targetType == TargetType.SELECT;
Mage mage = context.getMage();
final Entity mageEntity = mage.getEntity();
if (targetType == TargetType.SELF && mageEntity != null) {
result = TargetingResult.ENTITY;
return new Target(source, mageEntity);
}
CommandSender sender = mage.getCommandSender();
if (targetType == TargetType.SELF && mageEntity == null && sender != null && (sender instanceof BlockCommandSender)) {
BlockCommandSender commandBlock = (BlockCommandSender)mage.getCommandSender();
return new Target(commandBlock.getBlock().getLocation(), commandBlock.getBlock());
}
if (targetType == TargetType.SELF && source != null) {
return new Target(source, source.getBlock());
}
if (targetType == TargetType.SELF) {
return new Target(source);
}
Block block = null;
if (!ignoreBlocks) {
findTargetBlock(context, range);
block = currentBlock;
}
Target targetBlock = null;
if (block != null || isBlock) {
if (result == TargetingResult.BLOCK) {
targetBlock = new Target(source, block, useHitbox, hitboxBlockPadding);
} else {
Vector direction = source.getDirection();
Location targetLocation = source.clone().add(direction.multiply(range));
targetBlock = new Target(source, targetLocation, useHitbox, hitboxBlockPadding);
}
}
if (isBlock) {
return targetBlock;
}
// Don't target entities beyond the block we just hit,
// but only if that block was solid, and not just at max range
if (targetBlock != null && source != null && source.getWorld().equals(block.getWorld()) && !result.isMiss()) {
range = Math.min(range, source.distance(targetBlock.getLocation()));
}
// Pick the closest candidate entity
Target entityTarget = null;
List<Target> scored = getAllTargetEntities(context, range);
if (scored.size() > 0) {
entityTarget = scored.get(0);
}
// Don't allow targeting entities in an area you couldn't cast the spell in
if (context instanceof CastContext) {
CastContext castContext = (CastContext)context;
if (entityTarget != null && !castContext.canCast(entityTarget.getLocation())) {
entityTarget = null;
}
if (targetBlock != null && !castContext.canCast(targetBlock.getLocation())) {
result = TargetingResult.MISS;
targetBlock = null;
}
}
if (targetType == TargetType.OTHER_ENTITY && entityTarget == null) {
result = TargetingResult.MISS;
return new Target(source);
}
if (targetType == TargetType.ANY_ENTITY && entityTarget == null) {
result = TargetingResult.ENTITY;
return new Target(source, mageEntity);
}
if (entityTarget == null && targetType == TargetType.ANY && mageEntity != null) {
result = TargetingResult.ENTITY;
return new Target(source, mageEntity, targetBlock == null ? null : targetBlock.getBlock());
}
if (targetBlock != null && entityTarget != null) {
if (targetBlock.getDistanceSquared() < entityTarget.getDistanceSquared() - hitboxPadding * hitboxPadding && !result.isMiss()) {
entityTarget = null;
} else {
targetBlock = null;
}
}
if (entityTarget != null) {
result = TargetingResult.ENTITY;
return entityTarget;
} else if (targetBlock != null) {
return targetBlock;
}
result = TargetingResult.MISS;
return new Target(source);
} | 3.68 |
hbase_MobUtils_formatDate | /**
* Formats a date to a string.
* @param date The date.
* @return The string format of the date, it's yyyymmdd.
*/
public static String formatDate(Date date) {
return LOCAL_FORMAT.get().format(date);
} | 3.68 |
MagicPlugin_MageConversation_sayNextLine | /**
* Returns true when finished
*/
public boolean sayNextLine(List<String> dialog) {
Player target = targetPlayer.get();
if (target == null || nextLine >= dialog.size()) {
return true;
}
String configuredLines = dialog.get(nextLine);
if (!configuredLines.isEmpty()) {
String[] lines = configuredLines.split("\n");
for (String line : lines) {
String message = formatString.replace("$line", line);
message = message.replace("$speaker", speaker.getDisplayName())
.replace("$target", target.getDisplayName());
target.sendMessage(CompatibilityLib.getCompatibilityUtils().translateColors(message));
}
}
nextLine++;
return nextLine >= dialog.size();
} | 3.68 |
flink_EnvironmentSettings_getUserClassLoader | /**
* Returns the user {@link ClassLoader} to use for code generation, UDF loading and other
* operations requiring reflections on user code.
*/
@Internal
public ClassLoader getUserClassLoader() {
return classLoader;
} | 3.68 |
morf_SqlDialect_getUpdateStatementAssignmentsSql | /**
* Returns the assignments for the SET clause of an SQL UPDATE statement
* based on the {@link List} of {@link AliasedField}s provided.
*
* @param fields The {@link List} of {@link AliasedField}s to create the assignments from
* @return the assignments for the SET clause as a string
*/
protected String getUpdateStatementAssignmentsSql(Iterable<AliasedField> fields) {
Iterable<String> setStatements = Iterables.transform(fields, field -> field.getAlias() + " = " + getSqlFrom(field));
return Joiner.on(", ").join(setStatements);
} | 3.68 |
hudi_HoodieCombineHiveInputFormat_getInputPaths | /**
* MOD - Just added this for visibility.
*/
Path[] getInputPaths(JobConf job) throws IOException {
Path[] dirs = FileInputFormat.getInputPaths(job);
if (dirs.length == 0) {
// on tez we're avoiding to duplicate the file info in FileInputFormat.
if (HiveConf.getVar(job, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
try {
List<Path> paths = Utilities.getInputPathsTez(job, mrwork);
dirs = paths.toArray(new Path[paths.size()]);
} catch (Exception e) {
throw new IOException("Could not create input files", e);
}
} else {
throw new IOException("No input paths specified in job");
}
}
return dirs;
} | 3.68 |
morf_DummyXmlOutputStreamProvider_clearDestination | /**
* @see org.alfasoftware.morf.xml.XmlStreamProvider.XmlOutputStreamProvider#clearDestination()
*/
@Override
public void clearDestination() {
cleared = true;
} | 3.68 |
flink_HiveTablePartition_ofPartition | /**
* Creates a HiveTablePartition to represent a hive partition.
*
* @param hiveConf the HiveConf used to connect to HMS
* @param hiveVersion the version of hive in use, if it's null the version will be automatically
* detected
* @param dbName name of the database
* @param tableName name of the table
* @param partitionSpec map from each partition column to its value. The map should contain
* exactly all the partition columns and in the order in which the partition columns are
* defined
*/
public static HiveTablePartition ofPartition(
HiveConf hiveConf,
@Nullable String hiveVersion,
String dbName,
String tableName,
LinkedHashMap<String, String> partitionSpec) {
HiveShim hiveShim = getHiveShim(hiveVersion);
try (HiveMetastoreClientWrapper client =
new HiveMetastoreClientWrapper(hiveConf, hiveShim)) {
Table hiveTable = client.getTable(dbName, tableName);
Partition hivePartition =
client.getPartition(dbName, tableName, new ArrayList<>(partitionSpec.values()));
return new HiveTablePartition(
hivePartition.getSd(),
partitionSpec,
HiveReflectionUtils.getTableMetadata(hiveShim, hiveTable));
} catch (TException e) {
throw new FlinkHiveException(
String.format(
"Failed to create HiveTablePartition for partition %s of hive table %s.%s",
partitionSpec, dbName, tableName),
e);
}
} | 3.68 |
flink_HiveParserDefaultGraphWalker_walk | // walk the current operator and its descendants.
protected void walk(Node nd) throws SemanticException {
// Push the node in the stack
opStack.push(nd);
// While there are still nodes to dispatch...
while (!opStack.empty()) {
Node node = opStack.peek();
if (node.getChildren() == null || getDispatchedList().containsAll(node.getChildren())) {
// Dispatch current node
if (!getDispatchedList().contains(node)) {
dispatch(node, opStack);
opQueue.add(node);
}
opStack.pop();
continue;
}
// Add a single child and restart the loop
for (Node childNode : node.getChildren()) {
if (!getDispatchedList().contains(childNode)) {
opStack.push(childNode);
break;
}
}
} // end while
} | 3.68 |
framework_ComputedStyle_getBorderHeight | /**
* Returns the sum of the top and bottom border width.
*
* @since 7.5.3
* @return the sum of the top and bottom border
*/
public double getBorderHeight() {
double borderHeight = getDoubleProperty("borderTopWidth");
borderHeight += getDoubleProperty("borderBottomWidth");
return borderHeight;
} | 3.68 |
hadoop_AllocateResponse_numClusterNodes | /**
* Set the <code>numClusterNodes</code> of the response.
* @see AllocateResponse#setNumClusterNodes(int)
* @param numClusterNodes <code>numClusterNodes</code> of the response
* @return {@link AllocateResponseBuilder}
*/
@Private
@Unstable
public AllocateResponseBuilder numClusterNodes(int numClusterNodes) {
allocateResponse.setNumClusterNodes(numClusterNodes);
return this;
} | 3.68 |
framework_VCalendar_getRangeSelectListener | /**
* Get the listener that listens to the user highlighting a region in the
* calendar.
*
* @return
*/
public RangeSelectListener getRangeSelectListener() {
return rangeSelectListener;
} | 3.68 |
hadoop_DeletedDirTracker_isContained | /**
* Is a path directly contained in the set of deleted directories.
* @param dir directory to probe
* @return true if this directory is recorded as being deleted.
*/
boolean isContained(Path dir) {
return directories.getIfPresent(dir) != null;
} | 3.68 |
framework_Slot_onBrowserEvent | /*
* (non-Javadoc)
*
* @see com.google.gwt.user.client.ui.Widget#onBrowserEvent(com.google.gwt
* .user.client.Event)
*/
@Override
public void onBrowserEvent(Event event) {
super.onBrowserEvent(event);
if (DOM.eventGetType(event) == Event.ONLOAD && icon != null
&& icon.getElement() == DOM.eventGetTarget(event)) {
if (layout.getLayoutManager() != null) {
layout.getLayoutManager().layoutLater();
} else {
layout.updateCaptionOffset(caption);
}
}
} | 3.68 |
hudi_HoodieMetaserver_getMetaserverStorage | // only for test
public static MetaserverStorage getMetaserverStorage() {
return metaserverStorage;
} | 3.68 |
hbase_Scan_numFamilies | /** Returns the number of families in familyMap */
public int numFamilies() {
if (hasFamilies()) {
return this.familyMap.size();
}
return 0;
} | 3.68 |
streampipes_JdbcClient_save | /**
* Prepares a statement for the insertion of values or the
*
* @param event The event which should be saved to the Postgres table
* @throws SpRuntimeException When there was an error in the saving process
*/
protected void save(final Event event) throws SpRuntimeException {
//TODO: Add batch support (https://stackoverflow.com/questions/3784197/efficient-way-to-do-batch-inserts-with-jdbc)
checkConnected();
Map<String, Object> eventMap = event.getRaw();
if (event == null) {
throw new SpRuntimeException("event is null");
}
if (!this.tableDescription.tableExists()) {
// Creates the table
createTable();
this.tableDescription.setTableExists();
}
try {
checkConnected();
this.statementHandler.executePreparedStatement(
this.dbDescription, this.tableDescription,
connection, eventMap);
} catch (SQLException e) {
if (e.getSQLState().substring(0, 2).equals("42")) {
// If the table does not exists (because it got deleted or something, will cause the error
// code "42") we will try to create a new one. Otherwise we do not handle the exception.
LOG.warn("Table '" + this.tableDescription.getName() + "' was unexpectedly not found and gets recreated.");
this.tableDescription.setTableMissing();
createTable();
this.tableDescription.setTableExists();
try {
checkConnected();
this.statementHandler.executePreparedStatement(
this.dbDescription, this.tableDescription,
connection, eventMap);
} catch (SQLException e1) {
throw new SpRuntimeException(e1.getMessage());
}
} else {
throw new SpRuntimeException(e.getMessage());
}
}
} | 3.68 |
querydsl_MathExpressions_atan | /**
* Create a {@code atan(num)} expression
*
* <p>Returns the principal value of the arc tangent of num, expressed in radians.</p>
*
* @param num numeric expression
* @return atan(num)
*/
public static <A extends Number & Comparable<?>> NumberExpression<Double> atan(Expression<A> num) {
return Expressions.numberOperation(Double.class, Ops.MathOps.ATAN, num);
} | 3.68 |
hbase_KeyValue_matchingRows | /**
* Compare rows. Just calls Bytes.equals, but it's good to have this encapsulated.
* @param left Left row array.
* @param loffset Left row offset.
* @param llength Left row length.
* @param right Right row array.
* @param roffset Right row offset.
* @param rlength Right row length.
* @return Whether rows are the same row.
*/
public boolean matchingRows(final byte[] left, final int loffset, final int llength,
final byte[] right, final int roffset, final int rlength) {
return Bytes.equals(left, loffset, llength, right, roffset, rlength);
} | 3.68 |
pulsar_WebSocketWebResource_isAuthorized | /**
* Checks if user is authorized to produce/consume on a given topic.
*
* @param topic
* @return
* @throws Exception
*/
protected boolean isAuthorized(TopicName topic) throws Exception {
if (service().isAuthorizationEnabled()) {
return service().getAuthorizationService().canLookup(topic, clientAppId(), authData());
}
return true;
} | 3.68 |
flink_CsvRowSchemaConverter_convertType | /**
* Convert {@link LogicalType} to {@link CsvSchema.ColumnType} based on Jackson's categories.
*/
private static CsvSchema.ColumnType convertType(String fieldName, LogicalType type) {
if (STRING_TYPE_ROOTS.contains(type.getTypeRoot())) {
return CsvSchema.ColumnType.STRING;
} else if (NUMBER_TYPE_ROOTS.contains(type.getTypeRoot())) {
return CsvSchema.ColumnType.NUMBER;
} else if (BOOLEAN_TYPE_ROOTS.contains(type.getTypeRoot())) {
return CsvSchema.ColumnType.BOOLEAN;
} else if (type.getTypeRoot() == LogicalTypeRoot.ARRAY) {
validateNestedField(fieldName, ((ArrayType) type).getElementType());
return CsvSchema.ColumnType.ARRAY;
} else if (type.getTypeRoot() == LogicalTypeRoot.ROW) {
RowType rowType = (RowType) type;
for (LogicalType fieldType : rowType.getChildren()) {
validateNestedField(fieldName, fieldType);
}
return CsvSchema.ColumnType.ARRAY;
} else {
throw new IllegalArgumentException(
"Unsupported type '"
+ type.asSummaryString()
+ "' for field '"
+ fieldName
+ "'.");
}
} | 3.68 |
morf_ConnectionResourcesBean_getStatementPoolingMaxStatements | /**
* @see org.alfasoftware.morf.jdbc.ConnectionResources#getStatementPoolingMaxStatements()
*/
@Override
public int getStatementPoolingMaxStatements() {
return statementPoolingMaxStatements;
} | 3.68 |
hbase_CacheConfig_shouldCacheCompactedBlocksOnWrite | /** Returns true if blocks should be cached while writing during compaction, false if not */
public boolean shouldCacheCompactedBlocksOnWrite() {
return this.cacheCompactedDataOnWrite;
} | 3.68 |
pulsar_FunctionRuntimeManager_findFunctionAssignments | /**
* Find all instance assignments of function.
* @param tenant
* @param namespace
* @param functionName
* @return
*/
public synchronized Collection<Assignment> findFunctionAssignments(String tenant,
String namespace, String functionName) {
return findFunctionAssignments(tenant, namespace, functionName, this.workerIdToAssignments);
} | 3.68 |
rocketmq-connect_WrapperStatusListener_onStartup | /**
* Invoked after successful startup of the task.
*
* @param id The id of the task
*/
@Override
public void onStartup(ConnectorTaskId id) {
managementService.put(new TaskStatus(id, TaskStatus.State.RUNNING, workerId, generation()));
} | 3.68 |
flink_SlotProfile_getReservedAllocations | /**
* Returns a set of all reserved allocation ids from the execution graph. It will used by {@link
* PreviousAllocationSlotSelectionStrategy} to support local recovery. In this case, a vertex
* cannot take an reserved allocation unless it exactly prefers that allocation.
*
* <p>This is optional and can be empty if unused.
*/
public Set<AllocationID> getReservedAllocations() {
return reservedAllocations;
} | 3.68 |
hadoop_WriteOperationHelper_completeMPUwithRetries | /**
* This completes a multipart upload to the destination key via
* {@code finalizeMultipartUpload()}.
* Retry policy: retrying, translated.
* Retries increment the {@code errorCount} counter.
* @param destKey destination
* @param uploadId multipart operation Id
* @param partETags list of partial uploads
* @param length length of the upload
* @param errorCount a counter incremented by 1 on every error; for
* use in statistics
* @param putOptions put object options
* @return the result of the operation.
* @throws IOException if problems arose which could not be retried, or
* the retry count was exceeded
*/
@Retries.RetryTranslated
public CompleteMultipartUploadResponse completeMPUwithRetries(
String destKey,
String uploadId,
List<CompletedPart> partETags,
long length,
AtomicInteger errorCount,
PutObjectOptions putOptions)
throws IOException {
checkNotNull(uploadId);
checkNotNull(partETags);
LOG.debug("Completing multipart upload {} with {} parts",
uploadId, partETags.size());
return finalizeMultipartUpload(destKey,
uploadId,
partETags,
length,
putOptions,
(text, e, r, i) -> errorCount.incrementAndGet());
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.