name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
morf_SelectStatementBuilder_build | /**
* Builds the select statement.
*/
@Override
public SelectStatement build() {
return new SelectStatement(this);
} | 3.68 |
hmily_HmilySafeNumberOperationUtils_safeContains | /**
* Execute range contains method by safe mode.
*
* @param range range
* @param endpoint endpoint
* @return whether the endpoint is included in the range
*/
public static boolean safeContains(final Range<Comparable<?>> range, final Comparable<?> endpoint) {
try {
return range.contains(endpoint);
} catch (final ClassCastException ex) {
Comparable<?> rangeUpperEndpoint = range.hasUpperBound() ? range.upperEndpoint() : null;
Comparable<?> rangeLowerEndpoint = range.hasLowerBound() ? range.lowerEndpoint() : null;
Class<?> clazz = getTargetNumericType(Lists.newArrayList(rangeLowerEndpoint, rangeUpperEndpoint, endpoint));
if (clazz == null) {
throw ex;
}
Range<Comparable<?>> newRange = createTargetNumericTypeRange(range, clazz);
return newRange.contains(parseNumberByClazz(endpoint.toString(), clazz));
}
} | 3.68 |
querydsl_BooleanExpression_andAnyOf | /**
* Create a {@code this && any(predicates)} expression
*
* <p>Returns an intersection of this and the union of the given predicates</p>
*
* @param predicates union of predicates
* @return this && any(predicates)
*/
public BooleanExpression andAnyOf(Predicate... predicates) {
return and(ExpressionUtils.anyOf(predicates));
} | 3.68 |
framework_AbstractClientConnector_equals | /*
* (non-Javadoc)
*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
/*
* This equals method must return true when we're comparing an object to
* its proxy. This happens a lot with CDI (and possibly Spring) when
* we're injecting Components. See #14639
*/
if (obj instanceof AbstractClientConnector) {
AbstractClientConnector connector = (AbstractClientConnector) obj;
return connector.isThis(this);
}
return false;
} | 3.68 |
hadoop_LocalityMulticastAMRMProxyPolicy_chooseSubClusterIdForMaxLoadSC | /**
* Check if the current target subcluster is over max load, and if it is
* reroute it.
*
* @param targetId the original target subcluster id
* @param maxThreshold the max load threshold to reroute
* @param activeAndEnabledSCs the list of active and enabled subclusters
* @return targetId if it is within maxThreshold, otherwise a new id
*/
private SubClusterId chooseSubClusterIdForMaxLoadSC(SubClusterId targetId,
int maxThreshold, Set<SubClusterId> activeAndEnabledSCs) {
ArrayList<Float> weight = new ArrayList<>();
ArrayList<SubClusterId> scIds = new ArrayList<>();
int targetLoad = getSubClusterLoad(targetId);
if (targetLoad == -1 || !activeAndEnabledSCs.contains(targetId)) {
// Probably a SC that's not active and enabled. Forcing a reroute
targetLoad = Integer.MAX_VALUE;
}
/*
* Prepare the weight for a random draw among all known SCs.
*
* For SC with pending bigger than maxThreshold / 2, use maxThreshold /
* pending as weight. We multiplied by maxThreshold so that the weight
* won't be too small in value.
*
* For SC with pending less than maxThreshold / 2, we cap the weight at 2
* = (maxThreshold / (maxThreshold / 2)) so that SC with small pending
* will not get a huge weight and thus get swamped.
*/
for (SubClusterId sc : activeAndEnabledSCs) {
int scLoad = getSubClusterLoad(sc);
if (scLoad > targetLoad) {
// Never mind if it is not the most loaded SC
return targetId;
}
if (scLoad <= maxThreshold / 2) {
weight.add(2f);
} else {
weight.add((float) maxThreshold / scLoad);
}
scIds.add(sc);
}
if (weights.size() == 0) {
return targetId;
}
return scIds.get(FederationPolicyUtils.getWeightedRandom(weight));
} | 3.68 |
morf_DataSetUtils_record | /**
* Build a record.
*
* @see RecordBuilder
* @return A {@link RecordBuilder}.
*/
public static RecordBuilder record() {
return new RecordBuilderImpl();
} | 3.68 |
flink_TableChange_getValue | /** Returns the Option value to set. */
public String getValue() {
return value;
} | 3.68 |
hbase_MergeTableRegionsProcedure_createMergedRegion | /**
* Create merged region. The way the merge works is that we make a 'merges' temporary directory in
* the FIRST parent region to merge (Do not change this without also changing the rollback where
* we look in this FIRST region for the merge dir). We then collect here references to all the
* store files in all the parent regions including those of the FIRST parent region into a
* subdirectory, named for the resultant merged region. We then call commitMergeRegion. It finds
* this subdirectory of storefile references and moves them under the new merge region (creating
* the region layout as side effect). After assign of the new merge region, we will run a
* compaction. This will undo the references but the reference files remain in place until the
* archiver runs (which it does on a period as a chore in the RegionServer that hosts the merge
* region -- see CompactedHFilesDischarger). Once the archiver has moved aside the no-longer used
* references, the merge region no longer has references. The catalog janitor will notice when it
* runs next and it will remove the old parent regions.
*/
private void createMergedRegion(final MasterProcedureEnv env) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
final Path tableDir = CommonFSUtils.getTableDir(mfs.getRootDir(), regionsToMerge[0].getTable());
final FileSystem fs = mfs.getFileSystem();
List<Path> mergedFiles = new ArrayList<>();
HRegionFileSystem mergeRegionFs = HRegionFileSystem
.createRegionOnFileSystem(env.getMasterConfiguration(), fs, tableDir, mergedRegion);
for (RegionInfo ri : this.regionsToMerge) {
HRegionFileSystem regionFs = HRegionFileSystem
.openRegionFromFileSystem(env.getMasterConfiguration(), fs, tableDir, ri, false);
mergedFiles.addAll(mergeStoreFiles(env, regionFs, mergeRegionFs, mergedRegion));
}
assert mergeRegionFs != null;
mergeRegionFs.commitMergedRegion(mergedFiles, env);
// Prepare to create merged regions
env.getAssignmentManager().getRegionStates().getOrCreateRegionStateNode(mergedRegion)
.setState(State.MERGING_NEW);
} | 3.68 |
flink_RocksDBNativeMetricMonitor_setProperty | /** Updates the value of metricView if the reference is still valid. */
private void setProperty(RocksDBNativePropertyMetricView metricView) {
if (metricView.isClosed()) {
return;
}
try {
synchronized (lock) {
if (rocksDB != null) {
long value = rocksDB.getLongProperty(metricView.handle, metricView.property);
metricView.setValue(value);
}
}
} catch (RocksDBException e) {
metricView.close();
LOG.warn("Failed to read native metric {} from RocksDB.", metricView.property, e);
}
} | 3.68 |
morf_AbstractSqlDialectTest_testGreatest | /**
* Test the GREATEST functionality behaves as expected
*/
@Test
public void testGreatest() {
SelectStatement testStatement = select(greatest(new NullFieldLiteral(), field("bob"))).from(tableRef("MyTable"));
assertEquals(expectedGreatest().toLowerCase(), testDialect.convertStatementToSQL(testStatement).toLowerCase());
} | 3.68 |
flink_FixedLengthRecordSorter_writeToOutput | /**
* Writes a subset of the records in this buffer in their logical order to the given output.
*
* @param output The output view to write the records to.
* @param start The logical start position of the subset.
* @param num The number of elements to write.
* @throws IOException Thrown, if an I/O exception occurred writing to the output view.
*/
@Override
public void writeToOutput(final ChannelWriterOutputView output, final int start, int num)
throws IOException {
final TypeComparator<T> comparator = this.comparator;
final TypeSerializer<T> serializer = this.serializer;
T record = this.recordInstance;
final SingleSegmentInputView inView = this.inView;
final int recordsPerSegment = this.recordsPerSegment;
int currentMemSeg = start / recordsPerSegment;
int offset = (start % recordsPerSegment) * this.recordSize;
while (num > 0) {
final MemorySegment currentIndexSegment = this.sortBuffer.get(currentMemSeg++);
inView.set(currentIndexSegment, offset);
// check whether we have a full or partially full segment
if (num >= recordsPerSegment && offset == 0) {
// full segment
for (int numInMemSeg = 0; numInMemSeg < recordsPerSegment; numInMemSeg++) {
record = comparator.readWithKeyDenormalization(record, inView);
serializer.serialize(record, output);
}
num -= recordsPerSegment;
} else {
// partially filled segment
for (;
num > 0 && offset <= this.lastEntryOffset;
num--, offset += this.recordSize) {
record = comparator.readWithKeyDenormalization(record, inView);
serializer.serialize(record, output);
}
}
offset = 0;
}
} | 3.68 |
hbase_DefaultMobStoreFlusher_performMobFlush | /**
* Flushes the cells in the mob store.
* <ol>
* In the mob store, the cells with PUT type might have or have no mob tags.
* <li>If a cell does not have a mob tag, flushing the cell to different files depends on the
* value length. If the length is larger than a threshold, it's flushed to a mob file and the mob
* file is flushed to a store file in HBase. Otherwise, directly flush the cell to a store file in
* HBase.</li>
* <li>If a cell have a mob tag, its value is a mob file name, directly flush it to a store file
* in HBase.</li>
* </ol>
* @param snapshot Memstore snapshot.
* @param cacheFlushId Log cache flush sequence number.
* @param scanner The scanner of memstore snapshot.
* @param writer The store file writer.
* @param status Task that represents the flush operation and may be updated with
* status.
* @param throughputController A controller to avoid flush too fast.
*/
protected void performMobFlush(MemStoreSnapshot snapshot, long cacheFlushId,
InternalScanner scanner, StoreFileWriter writer, MonitoredTask status,
ThroughputController throughputController, Consumer<Path> writerCreationTracker)
throws IOException {
StoreFileWriter mobFileWriter = null;
int compactionKVMax =
conf.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT);
long mobCount = 0;
long mobSize = 0;
long time = snapshot.getTimeRangeTracker().getMax();
mobFileWriter = mobStore.getStoreEngine().requireWritingToTmpDirFirst()
? mobStore.createWriterInTmp(new Date(time), snapshot.getCellsCount(),
store.getColumnFamilyDescriptor().getCompressionType(), store.getRegionInfo().getStartKey(),
false)
: mobStore.createWriter(new Date(time), snapshot.getCellsCount(),
store.getColumnFamilyDescriptor().getCompressionType(), store.getRegionInfo().getStartKey(),
false, writerCreationTracker);
// the target path is {tableName}/.mob/{cfName}/mobFiles
// the relative path is mobFiles
byte[] fileName = Bytes.toBytes(mobFileWriter.getPath().getName());
ScannerContext scannerContext =
ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
List<Cell> cells = new ArrayList<>();
boolean hasMore;
String flushName = ThroughputControlUtil.getNameForThrottling(store, "flush");
boolean control =
throughputController != null && !store.getRegionInfo().getTable().isSystemTable();
if (control) {
throughputController.start(flushName);
}
IOException ioe = null;
// Clear all past MOB references
mobRefSet.get().clear();
try {
do {
hasMore = scanner.next(cells, scannerContext);
if (!cells.isEmpty()) {
for (Cell c : cells) {
// If we know that this KV is going to be included always, then let us
// set its memstoreTS to 0. This will help us save space when writing to
// disk.
if (
c.getValueLength() <= mobCellValueSizeThreshold || MobUtils.isMobReferenceCell(c)
|| c.getTypeByte() != KeyValue.Type.Put.getCode()
) {
writer.append(c);
} else {
// append the original keyValue in the mob file.
mobFileWriter.append(c);
mobSize += c.getValueLength();
mobCount++;
// append the tags to the KeyValue.
// The key is same, the value is the filename of the mob file
Cell reference =
MobUtils.createMobRefCell(c, fileName, this.mobStore.getRefCellTags());
writer.append(reference);
}
if (control) {
throughputController.control(flushName, c.getSerializedSize());
}
}
cells.clear();
}
} while (hasMore);
} catch (InterruptedException e) {
ioe =
new InterruptedIOException("Interrupted while control throughput of flushing " + flushName);
throw ioe;
} catch (IOException e) {
ioe = e;
throw e;
} finally {
if (control) {
throughputController.finish(flushName);
}
if (ioe != null) {
mobFileWriter.close();
}
}
if (mobCount > 0) {
// commit the mob file from temp folder to target folder.
// If the mob file is committed successfully but the store file is not,
// the committed mob file will be handled by the sweep tool as an unused
// file.
status.setStatus("Flushing mob file " + store + ": appending metadata");
mobFileWriter.appendMetadata(cacheFlushId, false, mobCount);
status.setStatus("Flushing mob file " + store + ": closing flushed file");
mobFileWriter.close();
mobStore.commitFile(mobFileWriter.getPath(), targetPath);
LOG.debug("Flush store file: {}, store: {}", writer.getPath(), getStoreInfo());
mobStore.updateMobFlushCount();
mobStore.updateMobFlushedCellsCount(mobCount);
mobStore.updateMobFlushedCellsSize(mobSize);
// Add mob reference to store file metadata
mobRefSet.get().add(mobFileWriter.getPath().getName());
} else {
try {
status.setStatus("Flushing mob file " + store + ": no mob cells, closing flushed file");
mobFileWriter.close();
// If the mob file is empty, delete it instead of committing.
store.getFileSystem().delete(mobFileWriter.getPath(), true);
} catch (IOException e) {
LOG.error("Failed to delete the temp mob file", e);
}
}
} | 3.68 |
pulsar_AbstractMetadataStore_execute | /**
* Run the task in the executor thread and fail the future if the executor is shutting down.
*/
@VisibleForTesting
public void execute(Runnable task, Supplier<List<CompletableFuture<?>>> futures) {
try {
executor.execute(task);
} catch (final Throwable t) {
futures.get().forEach(f -> f.completeExceptionally(t));
}
} | 3.68 |
flink_SingleInputPlanNode_getComparator | /**
* Gets the specified comparator from this PlanNode.
*
* @param id The ID of the requested comparator.
* @return The specified comparator.
*/
public TypeComparatorFactory<?> getComparator(int id) {
return comparators[id];
} | 3.68 |
framework_SizeWithUnit_parseStringSize | /**
* Returns an object whose numeric value and unit are taken from the string
* s. Null or empty string will produce {-1,Unit#PIXELS}. An exception is
* thrown if s specifies a number without a unit.
*
* @param s
* the string to be parsed
* @return an object containing the parsed value and unit
*/
public static SizeWithUnit parseStringSize(String s) {
return parseStringSize(s, null);
} | 3.68 |
framework_AbstractMultiSelectConnector_onDataChange | /**
* This method handles the parsing of the new JSON data containing the items
* and the selection information.
*
* @param range
* the updated range, never {@code null}
*/
protected void onDataChange(Range range) {
assert range.getStart() == 0
&& range.getEnd() == getDataSource().size() : getClass()
.getSimpleName()
+ " only supports full updates, but got range " + range;
List<JsonObject> items = new ArrayList<>(range.length());
for (int i = 0; i < range.getEnd(); i++) {
items.add(getDataSource().getRow(i));
}
getMultiSelectWidget().setItems(items);
} | 3.68 |
hbase_HFileWriterImpl_finishBlock | /** Clean up the data block that is currently being written. */
private void finishBlock() throws IOException {
if (!blockWriter.isWriting() || blockWriter.blockSizeWritten() == 0) {
return;
}
// Update the first data block offset if UNSET; used scanning.
if (firstDataBlockOffset == UNSET) {
firstDataBlockOffset = outputStream.getPos();
}
// Update the last data block offset each time through here.
lastDataBlockOffset = outputStream.getPos();
blockWriter.writeHeaderAndData(outputStream);
int onDiskSize = blockWriter.getOnDiskSizeWithHeader();
Cell indexEntry =
getMidpoint(this.hFileContext.getCellComparator(), lastCellOfPreviousBlock, firstCellInBlock);
dataBlockIndexWriter.addEntry(PrivateCellUtil.getCellKeySerializedAsKeyValueKey(indexEntry),
lastDataBlockOffset, onDiskSize);
totalUncompressedBytes += blockWriter.getUncompressedSizeWithHeader();
if (cacheConf.shouldCacheDataOnWrite()) {
doCacheOnWrite(lastDataBlockOffset);
}
} | 3.68 |
hbase_NettyRpcClientConfigHelper_setEventLoopConfig | /**
* Set the EventLoopGroup and channel class for {@code AsyncRpcClient}.
*/
public static void setEventLoopConfig(Configuration conf, EventLoopGroup group,
Class<? extends Channel> channelClass) {
Preconditions.checkNotNull(group, "group is null");
Preconditions.checkNotNull(channelClass, "channel class is null");
conf.set(EVENT_LOOP_CONFIG, CONFIG_NAME);
EVENT_LOOP_CONFIG_MAP.put(CONFIG_NAME,
Pair.<EventLoopGroup, Class<? extends Channel>> newPair(group, channelClass));
} | 3.68 |
morf_InsertStatementBuilder_values | /**
* Specifies the literal field values to insert.
*
* <p>
* Each field must have an alias which specifies the column to insert into.
* </p>
*
* @see AliasedField#as(String)
* @param fieldValues Literal field values to insert.
* @return this, for method chaining.
*/
public InsertStatementBuilder values(AliasedFieldBuilder... fieldValues) {
if (fromTable != null) {
throw new UnsupportedOperationException("Cannot specify both a literal set of field values and a from table.");
}
if (selectStatement != null) {
throw new UnsupportedOperationException("Cannot specify both a literal set of field values and a sub-select statement.");
}
this.values.addAll(Builder.Helper.buildAll(Lists.newArrayList(fieldValues)));
return this;
} | 3.68 |
flink_FlinkContainersSettings_setLogProperty | /**
* Sets a single Flink logging configuration property in the log4j format and returns a
* reference to this Builder enabling method chaining.
*
* @param key The property key.
* @param value The property value.
* @return A reference to this Builder.
*/
public Builder setLogProperty(String key, String value) {
this.logProperties.setProperty(key, value);
return this;
} | 3.68 |
hadoop_IntegerSplitter_split | /**
* Returns a list of longs one element longer than the list of input splits.
* This represents the boundaries between input splits.
* All splits are open on the top end, except the last one.
*
* So the list [0, 5, 8, 12, 18] would represent splits capturing the intervals:
*
* [0, 5)
* [5, 8)
* [8, 12)
* [12, 18] note the closed interval for the last split.
*/
List<Long> split(long numSplits, long minVal, long maxVal)
throws SQLException {
List<Long> splits = new ArrayList<Long>();
// Use numSplits as a hint. May need an extra task if the size doesn't
// divide cleanly.
long splitSize = (maxVal - minVal) / numSplits;
if (splitSize < 1) {
splitSize = 1;
}
long curVal = minVal;
while (curVal <= maxVal) {
splits.add(curVal);
curVal += splitSize;
}
if (splits.get(splits.size() - 1) != maxVal || splits.size() == 1) {
// We didn't end on the maxVal. Add that to the end of the list.
splits.add(maxVal);
}
return splits;
} | 3.68 |
framework_VFlash_setCodebase | /**
* This attribute specifies the base path used to resolve relative URIs
* specified by the classid, data, and archive attributes. The default value
* is the base URI of the current document.
*
* @param codebase
* The base path
*
* @see #setClassId(String)
* @see #setArchive(String)
*/
public void setCodebase(String codebase) {
if (this.codebase != codebase) {
this.codebase = codebase;
needsRebuild = true;
}
} | 3.68 |
hbase_RegionInfoDisplay_getDescriptiveNameFromRegionStateForDisplay | /**
* Get the descriptive name as {@link RegionState} does it but with hidden startkey optionally
* @return descriptive string
*/
public static String getDescriptiveNameFromRegionStateForDisplay(RegionState state,
Configuration conf) {
if (conf.getBoolean(DISPLAY_KEYS_KEY, true)) return state.toDescriptiveString();
String descriptiveStringFromState = state.toDescriptiveString();
int idx = descriptiveStringFromState.lastIndexOf(" state=");
String regionName = getRegionNameAsStringForDisplay(
RegionInfoBuilder.newBuilder(state.getRegion()).build(), conf);
return regionName + descriptiveStringFromState.substring(idx);
} | 3.68 |
hbase_SegmentScanner_getHighest | /**
* Private internal method that returns the higher of the two key values, or null if they are both
* null
*/
private Cell getHighest(Cell first, Cell second) {
if (first == null && second == null) {
return null;
}
if (first != null && second != null) {
int compare = segment.compare(first, second);
return (compare > 0 ? first : second);
}
return (first != null ? first : second);
} | 3.68 |
hbase_HBaseServiceHandler_getAdmin | /**
* Obtain HBaseAdmin. Creates the instance if it is not already created.
*/
protected Admin getAdmin() throws IOException {
return connectionCache.getAdmin();
} | 3.68 |
graphhopper_VectorTile_hasType | /**
* <pre>
* The type of geometry stored in this feature.
* </pre>
*
* <code>optional .vector_tile.Tile.GeomType type = 3 [default = UNKNOWN];</code>
*/
public boolean hasType() {
return ((bitField0_ & 0x00000004) == 0x00000004);
} | 3.68 |
hbase_HFileArchiveManager_getTableNode | /**
* Get the zookeeper node associated with archiving the given table
* @param table name of the table to check
* @return znode for the table's archive status
*/
private String getTableNode(byte[] table) {
return ZNodePaths.joinZNode(archiveZnode, Bytes.toString(table));
} | 3.68 |
framework_DDEventHandleStrategy_handleDragImageEvent | /**
* Handles event when drag image element (
* {@link VDragAndDropManager#getDragElement()} return value) is not null or
* {@code event} is touch event.
*
* If method returns {@code true} then event processing will be stoped.
*
* @param target
* target element over which DnD event has happened
* @param event
* GWT event for active DnD operation
* @param mediator
* VDragAndDropManager data accessor
* @return {@code true} is strategy handled the event and no further steps
* to handle required.
*/
public boolean handleDragImageEvent(Element target,
NativePreviewEvent event, DDManagerMediator mediator) {
VDragAndDropManager manager = mediator.getManager();
// ApplicationConnection.getConsole().log(
// "Event on dragImage, target changed");
// special handling for events over dragImage
// pretty much all events are mousemove althout below
// kind of happens mouseover
switch (event.getTypeInt()) {
case Event.ONMOUSEOVER:
case Event.ONMOUSEOUT:
// ApplicationConnection
// .getConsole()
// .log(
// "IGNORING proxy image event, fired because of hack or not
// significant");
return true;
case Event.ONMOUSEMOVE:
case Event.ONTOUCHMOVE:
VDropHandler findDragTarget = findDragTarget(target, mediator);
if (findDragTarget != manager.getCurrentDropHandler()) {
// dragleave on old
handleDragLeave(mediator, true);
// dragenter on new
manager.setCurrentDropHandler(findDragTarget);
handleDragEnter(target, mediator);
} else if (findDragTarget != null) {
handleDragOver(target, mediator);
}
// prevent text selection on IE
event.getNativeEvent().preventDefault();
return true;
default:
// NOP
break;
}
return false;
} | 3.68 |
flink_PojoSerializerSnapshot_getCompatibilityOfPreExistingRegisteredSubclasses | /**
* Finds which registered subclasses exists both in the new {@link PojoSerializer} as well as in
* the previous one (represented by this snapshot), and returns an {@link
* IntermediateCompatibilityResult} of the serializers of this preexisting registered
* subclasses.
*/
private static <T>
IntermediateCompatibilityResult<T> getCompatibilityOfPreExistingRegisteredSubclasses(
PojoSerializer<T> newPojoSerializer,
LinkedOptionalMap<Class<?>, TypeSerializerSnapshot<?>>
registeredSubclassSerializerSnapshots) {
final LinkedHashMap<Class<?>, TypeSerializerSnapshot<?>> unwrappedSerializerSnapshots =
registeredSubclassSerializerSnapshots.unwrapOptionals();
final ArrayList<TypeSerializerSnapshot<?>> associatedSubclassSerializerSnapshots =
new ArrayList<>();
final ArrayList<TypeSerializer<?>> associatedNewSubclassSerializers = new ArrayList<>();
final LinkedHashMap<Class<?>, TypeSerializer<?>> newSubclassSerializerRegistry =
newPojoSerializer.getBundledSubclassSerializerRegistry();
for (Map.Entry<Class<?>, TypeSerializerSnapshot<?>> entry :
unwrappedSerializerSnapshots.entrySet()) {
TypeSerializer<?> newRegisteredSerializer =
newSubclassSerializerRegistry.get(entry.getKey());
if (newRegisteredSerializer != null) {
associatedSubclassSerializerSnapshots.add(entry.getValue());
associatedNewSubclassSerializers.add(newRegisteredSerializer);
}
}
return CompositeTypeSerializerUtil.constructIntermediateCompatibilityResult(
associatedNewSubclassSerializers.toArray(
new TypeSerializer<?>[associatedNewSubclassSerializers.size()]),
associatedSubclassSerializerSnapshots.toArray(
new TypeSerializerSnapshot<?>
[associatedSubclassSerializerSnapshots.size()]));
} | 3.68 |
flink_ResolvedSchema_getColumnDataTypes | /**
* Returns all column data types. It does not distinguish between different kinds of columns.
*/
public List<DataType> getColumnDataTypes() {
return columns.stream().map(Column::getDataType).collect(Collectors.toList());
} | 3.68 |
hbase_KeyValue_getRowArray | /**
* Returns the backing array of the entire KeyValue (all KeyValue fields are in a single array)
*/
@Override
public byte[] getRowArray() {
return bytes;
} | 3.68 |
framework_AbstractComponent_hasEqualWidth | /**
* Test if the given component has equal width with this instance
*
* @param component
* the component for the width comparison
* @return true if the widths are equal
*/
private boolean hasEqualWidth(Component component) {
return getWidth() == component.getWidth()
&& getWidthUnits().equals(component.getWidthUnits());
} | 3.68 |
hbase_Bytes_split | /**
* Split passed range. Expensive operation relatively. Uses BigInteger math. Useful splitting
* ranges for MapReduce jobs.
* @param a Beginning of range
* @param b End of range
* @param inclusive Whether the end of range is prefix-inclusive or is considered an exclusive
* boundary. Automatic splits are generally exclusive and manual splits with an
* explicit range utilize an inclusive end of range.
* @param num Number of times to split range. Pass 1 if you want to split the range in two;
* i.e. one split.
* @return Array of dividing values
*/
public static byte[][] split(final byte[] a, final byte[] b, boolean inclusive, final int num) {
byte[][] ret = new byte[num + 2][];
int i = 0;
Iterable<byte[]> iter = iterateOnSplits(a, b, inclusive, num);
if (iter == null) return null;
for (byte[] elem : iter) {
ret[i++] = elem;
}
return ret;
} | 3.68 |
querydsl_SQLTemplates_serializeModifiers | /**
* template method for LIMIT and OFFSET serialization
*
* @param metadata
* @param context
*/
protected void serializeModifiers(QueryMetadata metadata, SQLSerializer context) {
QueryModifiers mod = metadata.getModifiers();
if (mod.getLimit() != null) {
context.handle(limitTemplate, mod.getLimit());
} else if (limitRequired) {
context.handle(limitTemplate, maxLimit);
}
if (mod.getOffset() != null) {
context.handle(offsetTemplate, mod.getOffset());
}
} | 3.68 |
flink_StreamProjection_projectTuple14 | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>
SingleOutputStreamOperator<
Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>>
projectTuple14() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>> tType =
new TupleTypeInfo<
Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>>(
fTypes);
return dataStream.transform(
"Projection",
tType,
new StreamProject<
IN, Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>>(
fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.68 |
morf_InsertStatement_getFieldDefaults | /**
* Gets the field defaults that should be used when inserting new fields.
*
* @return a map of field names to field default values to use during this insert.
*/
public Map<String, AliasedField> getFieldDefaults() {
return fieldDefaults;
} | 3.68 |
hbase_ByteBuff_toBytes | /**
* Copy the content from this ByteBuff to a byte[].
*/
public byte[] toBytes() {
return toBytes(0, this.limit());
} | 3.68 |
querydsl_TimeExpression_milliSecond | /**
* Create a milliseconds expression (range 0-999)
* <p>Is always 0 in JPA and JDO modules</p>
*
* @return milli second
*/
public NumberExpression<Integer> milliSecond() {
if (milliseconds == null) {
milliseconds = Expressions.numberOperation(Integer.class, Ops.DateTimeOps.MILLISECOND, mixin);
}
return milliseconds;
} | 3.68 |
morf_SelectStatementBuilder_forUpdate | /**
* Tells the database to pessimistically lock the tables.
*
* @return this, for method chaining.
*/
public SelectStatementBuilder forUpdate() {
this.forUpdate = true;
return this;
} | 3.68 |
hbase_RequestConverter_buildGetSpaceQuotaRegionSizesRequest | /**
* Returns a {@link GetSpaceQuotaRegionSizesRequest} object.
*/
public static GetSpaceQuotaRegionSizesRequest buildGetSpaceQuotaRegionSizesRequest() {
return GetSpaceQuotaRegionSizesRequest.getDefaultInstance();
} | 3.68 |
flink_NFA_extractCurrentMatches | /**
* Extracts all the sequences of events from the start to the given computation state. An event
* sequence is returned as a map which contains the events and the names of the states to which
* the events were mapped.
*
* @param sharedBufferAccessor The accessor to {@link SharedBuffer} from which to extract the
* matches
* @param computationState The end computation state of the extracted event sequences
* @return Collection of event sequences which end in the given computation state
* @throws Exception Thrown if the system cannot access the state.
*/
private Map<String, List<EventId>> extractCurrentMatches(
final SharedBufferAccessor<T> sharedBufferAccessor,
final ComputationState computationState)
throws Exception {
if (computationState.getPreviousBufferEntry() == null) {
return new HashMap<>();
}
List<Map<String, List<EventId>>> paths =
sharedBufferAccessor.extractPatterns(
computationState.getPreviousBufferEntry(), computationState.getVersion());
if (paths.isEmpty()) {
return new HashMap<>();
}
// for a given computation state, we cannot have more than one matching patterns.
Preconditions.checkState(paths.size() == 1);
return paths.get(0);
} | 3.68 |
hbase_HFile_getSupportedCompressionAlgorithms | /**
* Get names of supported compression algorithms. The names are acceptable by HFile.Writer.
* @return Array of strings, each represents a supported compression algorithm. Currently, the
* following compression algorithms are supported.
* <ul>
* <li>"none" - No compression.
* <li>"gz" - GZIP compression.
* </ul>
*/
public static String[] getSupportedCompressionAlgorithms() {
return Compression.getSupportedAlgorithms();
} | 3.68 |
streampipes_PipelineElementMigrationManager_migratePipelineElement | /**
* Handle the migration of a pipeline element with respect to the given model migration configs.
* All applicable migrations found in the provided configs are executed for the given pipeline element.
* In case a migration fails, the related pipeline element receives the latest definition of its static properties,
* so that the pipeline element can be adapted by the user to resolve the failed migration.
*
* @param pipelineElement pipeline element to be migrated
* @param modelMigrations list of model migrations that might be applicable for this pipeline element
* @param url url of the extensions service endpoint that handles the migration
* @param failedMigrations collection of failed migrations which is extended by occurring migration failures
* @param <T> type of the pipeline element (e.g., DataProcessorInvocation)
* @return the migrated (or - in case of a failure - updated) pipeline element
*/
protected <T extends InvocableStreamPipesEntity> T migratePipelineElement(
T pipelineElement,
List<ModelMigratorConfig> modelMigrations,
String url,
List<MigrationResult<?>> failedMigrations
) {
// loop until no migrations are available anymore
// this allows to apply multiple migrations for a pipeline element sequentially
// For example, first migration from 0 to 1 and the second migration from 1 to 2
while (getApplicableMigration(pipelineElement, modelMigrations).isPresent() && failedMigrations.isEmpty()) {
var migrationConfig = getApplicableMigration(pipelineElement, modelMigrations).get();
LOG.info(
"Found applicable migration for pipeline element '{}': {}",
pipelineElement.getElementId(),
migrationConfig
);
var migrationResult = performMigration(
pipelineElement,
migrationConfig,
url
);
if (migrationResult.success()) {
LOG.info("Migration successfully performed by extensions service. Updating pipeline element invocation ...");
LOG.debug("Migration was performed at extensions service endpoint '{}'", url);
pipelineElement = migrationResult.element();
} else {
LOG.error("Migration failed with the following reason: {}", migrationResult.message());
failedMigrations.add(migrationResult);
}
}
if (!failedMigrations.isEmpty()) {
updateFailedPipelineElement(pipelineElement);
LOG.info("Updated pipeline elements with new description where automatic migration failed.");
}
return pipelineElement;
} | 3.68 |
hudi_HoodieAvroUtils_jsonBytesToAvro | /**
* Convert json bytes back into avro record.
*/
public static GenericRecord jsonBytesToAvro(byte[] bytes, Schema schema) throws IOException {
ByteArrayInputStream bio = new ByteArrayInputStream(bytes);
JsonDecoder jsonDecoder = DecoderFactory.get().jsonDecoder(schema, bio);
GenericDatumReader<GenericRecord> reader = new GenericDatumReader<>(schema);
return reader.read(null, jsonDecoder);
} | 3.68 |
dubbo_ApplicationModel_getName | /**
* @deprecated Replace to {@link ApplicationModel#getApplicationName()}
*/
@Deprecated
public static String getName() {
return defaultModel().getCurrentConfig().getName();
} | 3.68 |
hadoop_FSDataOutputStreamBuilder_overwrite | /**
* Set to true to overwrite the existing file.
* Set it to false, an exception will be thrown when calling {@link #build()}
* if the file exists.
*
* @param overwrite overrite.
* @return Generics Type B.
*/
public B overwrite(boolean overwrite) {
if (overwrite) {
flags.add(CreateFlag.OVERWRITE);
} else {
flags.remove(CreateFlag.OVERWRITE);
}
return getThisBuilder();
} | 3.68 |
zxing_GenericGF_multiply | /**
* @return product of a and b in GF(size)
*/
int multiply(int a, int b) {
if (a == 0 || b == 0) {
return 0;
}
return expTable[(logTable[a] + logTable[b]) % (size - 1)];
} | 3.68 |
framework_VaadinService_ensurePushAvailable | /**
* Enables push if push support is available and push has not yet been
* enabled.
*
* If push support is not available, a warning explaining the situation will
* be logged at least the first time this method is invoked.
*
* @return <code>true</code> if push can be used; <code>false</code> if push
* is not available.
*/
public boolean ensurePushAvailable() {
if (isAtmosphereAvailable()) {
return true;
} else {
if (!pushWarningEmitted) {
pushWarningEmitted = true;
getLogger().log(Level.WARNING,
Constants.ATMOSPHERE_MISSING_ERROR);
}
return false;
}
} | 3.68 |
flink_FlinkRelMetadataQuery_getRelModifiedMonotonicity | /**
* Returns the {@link RelModifiedMonotonicity} statistic.
*
* @param rel the relational expression
* @return the monotonicity for the corresponding RelNode
*/
public RelModifiedMonotonicity getRelModifiedMonotonicity(RelNode rel) {
for (; ; ) {
try {
return modifiedMonotonicityHandler.getRelModifiedMonotonicity(rel, this);
} catch (JaninoRelMetadataProvider.NoHandler e) {
modifiedMonotonicityHandler =
revise(e.relClass, FlinkMetadata.ModifiedMonotonicity.DEF);
}
}
} | 3.68 |
hbase_MetricsTableRequests_updateCheckAndPut | /**
* Update the CheckAndPut time histogram.
* @param time time it took
*/
public void updateCheckAndPut(long time) {
if (isEnableTableLatenciesMetrics()) {
checkAndPutTimeHistogram.update(time);
}
} | 3.68 |
framework_VCustomLayout_initializeHTML | /**
* Initialize HTML-layout.
*
* @param template
* original HTML-template
* @param themeUri
* URI to the current theme
*/
public void initializeHTML(String template, String themeUri) {
// Connect body of the template to DOM
template = extractBodyAndScriptsFromTemplate(template);
// TODO prefix img src:s here with a regeps, cannot work further with IE
String relImgPrefix = WidgetUtil
.escapeAttribute(themeUri + "/layouts/");
// prefix all relative image elements to point to theme dir with a
// regexp search
template = template.replaceAll(
"<((?:img)|(?:IMG))\\s([^>]*)src=\"((?![a-z]+:)[^/][^\"]+)\"",
"<$1 $2src=\"" + relImgPrefix + "$3\"");
// also support src attributes without quotes
template = template.replaceAll(
"<((?:img)|(?:IMG))\\s([^>]*)src=[^\"]((?![a-z]+:)[^/][^ />]+)[ />]",
"<$1 $2src=\"" + relImgPrefix + "$3\"");
// also prefix relative style="...url(...)..."
template = template.replaceAll(
"(<[^>]+style=\"[^\"]*url\\()((?![a-z]+:)[^/][^\"]+)(\\)[^>]*>)",
"$1 " + relImgPrefix + "$2 $3");
getElement().setInnerHTML(template);
// Remap locations to elements
locationToElement.clear();
scanForLocations(getElement());
initImgElements();
elementWithNativeResizeFunction = DOM.getFirstChild(getElement());
if (elementWithNativeResizeFunction == null) {
elementWithNativeResizeFunction = getElement();
}
publishResizedFunction(elementWithNativeResizeFunction);
htmlInitialized = true;
} | 3.68 |
flink_TableColumn_asSummaryString | /** Returns a string that summarizes this column for printing to a console. */
public String asSummaryString() {
final StringBuilder sb = new StringBuilder();
sb.append(name);
sb.append(": ");
sb.append(type);
explainExtras()
.ifPresent(
e -> {
sb.append(" ");
sb.append(e);
});
return sb.toString();
} | 3.68 |
pulsar_AbstractTopic_updatePublishDispatcher | /**
* update topic publish dispatcher for this topic.
*/
public void updatePublishDispatcher() {
synchronized (topicPublishRateLimiterLock) {
PublishRate publishRate = topicPolicies.getPublishRate().get();
if (publishRate.publishThrottlingRateInByte > 0 || publishRate.publishThrottlingRateInMsg > 0) {
log.info("Enabling publish rate limiting {} on topic {}", publishRate, getName());
if (!preciseTopicPublishRateLimitingEnable) {
this.brokerService.setupTopicPublishRateLimiterMonitor();
}
if (this.topicPublishRateLimiter == null
|| this.topicPublishRateLimiter == PublishRateLimiter.DISABLED_RATE_LIMITER) {
// create new rateLimiter if rate-limiter is disabled
if (preciseTopicPublishRateLimitingEnable) {
this.topicPublishRateLimiter = new PrecisePublishLimiter(publishRate,
() -> this.enableCnxAutoRead(), brokerService.pulsar().getExecutor());
} else {
this.topicPublishRateLimiter = new PublishRateLimiterImpl(publishRate);
}
} else {
this.topicPublishRateLimiter.update(publishRate);
}
} else {
if (log.isDebugEnabled()) {
log.debug("Disabling publish throttling for {}", this.topic);
}
if (topicPublishRateLimiter != null) {
topicPublishRateLimiter.close();
}
this.topicPublishRateLimiter = PublishRateLimiter.DISABLED_RATE_LIMITER;
enableProducerReadForPublishRateLimiting();
}
}
} | 3.68 |
hudi_BaseKeyGenerator_getKey | /**
* Generate a Hoodie Key out of provided generic record.
*/
@Override
public final HoodieKey getKey(GenericRecord record) {
if (getRecordKeyFieldNames() == null || getPartitionPathFields() == null) {
throw new HoodieKeyException("Unable to find field names for record key or partition path in cfg");
}
return new HoodieKey(getRecordKey(record), getPartitionPath(record));
} | 3.68 |
hbase_RegionCoprocessorHost_preFlushScannerOpen | /**
* Invoked before create StoreScanner for flush.
*/
public ScanInfo preFlushScannerOpen(HStore store, FlushLifeCycleTracker tracker)
throws IOException {
if (coprocEnvironments.isEmpty()) {
return store.getScanInfo();
}
CustomizedScanInfoBuilder builder = new CustomizedScanInfoBuilder(store.getScanInfo());
execOperation(new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {
observer.preFlushScannerOpen(this, store, builder, tracker);
}
});
return builder.build();
} | 3.68 |
hadoop_ConfigurationUtils_load | /**
* Create a configuration from an InputStream.
* <p>
* ERROR canibalized from <code>Configuration.loadResource()</code>.
*
* @param is inputstream to read the configuration from.
*
* @throws IOException thrown if the configuration could not be read.
*/
public static void load(Configuration conf, InputStream is) throws IOException {
conf.addResource(is);
} | 3.68 |
hmily_IndexedBinder_bindIndexed | /**
* Bind indexed elements to the supplied collection.
*
* @param root The name of the property to bind
* @param target the target bindable
* @param elementBinder the binder to use for elements
* @param aggregateType the aggregate type, may be a collection or an array
* @param elementType the element type
* @param collection the destination for results
*/
void bindIndexed(final PropertyName root,
final BindData<?> target,
final AggregateElementBinder elementBinder,
final DataType aggregateType,
final DataType elementType,
final IndexedCollectionSupplier collection) {
ConfigPropertySource source = getEnv().getSource();
ConfigProperty property = source.findProperty(root);
if (property != null) {
bindValue(collection.get(), property.getValue());
} else {
bindIndexed(source, root, elementBinder, collection, elementType);
}
} | 3.68 |
flink_PythonEnvUtils_preparePythonEnvironment | /**
* Prepares PythonEnvironment to start python process.
*
* @param config The Python configurations.
* @param entryPointScript The entry point script, optional.
* @param tmpDir The temporary directory which files will be copied to.
* @return PythonEnvironment the Python environment which will be executed in Python process.
*/
static PythonEnvironment preparePythonEnvironment(
ReadableConfig config, String entryPointScript, String tmpDir) throws IOException {
PythonEnvironment env = new PythonEnvironment();
// 1. set the path of python interpreter.
String pythonExec =
config.getOptional(PYTHON_CLIENT_EXECUTABLE)
.orElse(System.getenv(PYFLINK_CLIENT_EXECUTABLE));
if (pythonExec != null) {
env.pythonExec = pythonExec;
}
// 2. setup temporary local directory for the user files
tmpDir = new File(tmpDir).getAbsolutePath();
Path tmpDirPath = new Path(tmpDir);
tmpDirPath.getFileSystem().mkdirs(tmpDirPath);
env.tempDirectory = tmpDir;
// 3. append the internal lib files to PYTHONPATH.
if (System.getenv(ConfigConstants.ENV_FLINK_OPT_DIR) != null) {
String pythonLibDir =
System.getenv(ConfigConstants.ENV_FLINK_OPT_DIR) + File.separator + "python";
env.pythonPath =
getLibFiles(pythonLibDir).stream()
.map(p -> p.toFile().getAbsolutePath())
.collect(Collectors.joining(File.pathSeparator));
}
// 4. copy relevant python files to tmp dir and set them in PYTHONPATH.
if (config.getOptional(PYTHON_FILES).isPresent()) {
List<Path> pythonFiles =
Arrays.stream(config.get(PYTHON_FILES).split(FILE_DELIMITER))
.map(Path::new)
.collect(Collectors.toList());
addToPythonPath(env, pythonFiles);
}
// 5. set the archives directory as the working directory, then user could access the
// content of the archives via relative path
if (config.getOptional(PYTHON_ARCHIVES).isPresent()
&& (config.getOptional(PYTHON_CLIENT_EXECUTABLE).isPresent()
|| !StringUtils.isNullOrWhitespaceOnly(
System.getenv(PYFLINK_CLIENT_EXECUTABLE)))) {
env.archivesDirectory = String.join(File.separator, tmpDir, PYTHON_ARCHIVES_DIR);
// extract archives to archives directory
config.getOptional(PYTHON_ARCHIVES)
.ifPresent(
pyArchives -> {
for (String archive : pyArchives.split(FILE_DELIMITER)) {
final Path archivePath;
final String targetDirName;
final String originalFileName;
if (archive.contains(PythonDependencyUtils.PARAM_DELIMITER)) {
String[] filePathAndTargetDir =
archive.split(
PythonDependencyUtils.PARAM_DELIMITER, 2);
archivePath = new Path(filePathAndTargetDir[0]);
targetDirName = filePathAndTargetDir[1];
originalFileName = archivePath.getName();
} else {
archivePath = new Path(archive);
originalFileName = archivePath.getName();
targetDirName = originalFileName;
}
Path localArchivePath = archivePath;
try {
if (archivePath.getFileSystem().isDistributedFS()) {
localArchivePath =
new Path(
env.tempDirectory,
String.join(
File.separator,
UUID.randomUUID().toString(),
originalFileName));
FileUtils.copy(archivePath, localArchivePath, false);
}
} catch (IOException e) {
String msg =
String.format(
"Error occurred when copying %s to %s.",
archivePath, localArchivePath);
throw new RuntimeException(msg, e);
}
try {
CompressionUtils.extractFile(
localArchivePath.getPath(),
String.join(
File.separator,
env.archivesDirectory,
targetDirName),
originalFileName);
} catch (IOException e) {
throw new RuntimeException(
"Extract archives to archives directory failed.",
e);
}
}
});
}
// 4. append configured python.pythonpath to the PYTHONPATH.
if (config.getOptional(PYTHON_PATH).isPresent()) {
env.pythonPath =
String.join(
File.pathSeparator,
config.getOptional(PYTHON_PATH).get(),
env.pythonPath);
}
if (entryPointScript != null) {
addToPythonPath(env, Collections.singletonList(new Path(entryPointScript)));
}
return env;
} | 3.68 |
morf_MySqlDialect_getSqlForNow | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForNow(org.alfasoftware.morf.sql.element.Function)
*/
@Override
protected String getSqlForNow(Function function) {
return "UTC_TIMESTAMP()";
} | 3.68 |
hbase_ForeignException_serialize | /**
* Converts a ForeignException to an array of bytes.
* @param source the name of the external exception source
* @param t the "local" external exception (local)
* @return protobuf serialized version of ForeignException
*/
public static byte[] serialize(String source, Throwable t) {
GenericExceptionMessage.Builder gemBuilder = GenericExceptionMessage.newBuilder();
gemBuilder.setClassName(t.getClass().getName());
if (t.getMessage() != null) {
gemBuilder.setMessage(t.getMessage());
}
// set the stack trace, if there is one
List<StackTraceElementMessage> stack =
ForeignException.toStackTraceElementMessages(t.getStackTrace());
if (stack != null) {
gemBuilder.addAllTrace(stack);
}
GenericExceptionMessage payload = gemBuilder.build();
ForeignExceptionMessage.Builder exception = ForeignExceptionMessage.newBuilder();
exception.setGenericException(payload).setSource(source);
ForeignExceptionMessage eem = exception.build();
return eem.toByteArray();
} | 3.68 |
pulsar_FunctionCacheManager_unregisterFunction | /**
* Unregisters a job from the function cache manager.
*
* @param fid function id
*/
default void unregisterFunction(String fid) {
unregisterFunctionInstance(fid, null);
} | 3.68 |
framework_FlyweightRow_removeCells | /**
* Removes cell representations (i.e. removed columns) from the indicated
* cell range and updates the subsequent indexing.
*
* @param index
* start index of the range
* @param numberOfColumns
* length of the range
*/
public void removeCells(final int index, final int numberOfColumns) {
cells.subList(index, index + numberOfColumns).clear();
updateRestOfCells(index);
} | 3.68 |
flink_TimeWindowUtil_toEpochMillsForTimer | /**
* Get a timer time according to the timestamp mills and the given shift timezone.
*
* @param utcTimestampMills the timestamp mills.
* @param shiftTimeZone the timezone that the given timestamp mills has been shifted.
* @return the epoch mills.
*/
public static long toEpochMillsForTimer(long utcTimestampMills, ZoneId shiftTimeZone) {
// Long.MAX_VALUE is a flag of max watermark, directly return it
if (UTC_ZONE_ID.equals(shiftTimeZone) || Long.MAX_VALUE == utcTimestampMills) {
return utcTimestampMills;
}
if (TimeZone.getTimeZone(shiftTimeZone).useDaylightTime()) {
/*
* return the first skipped epoch mills as timer time if the time is coming the DST.
* eg. Los_Angele has no timestamp 2021-03-14 02:00:00 when coming DST.
* <pre>
* 2021-03-14 00:00:00 -> epoch1 = 1615708800000L;
* 2021-03-14 01:00:00 -> epoch2 = 1615712400000L;
* 2021-03-14 03:00:00 -> epoch3 = 1615716000000L; skip one hour (2021-03-14 02:00:00)
* 2021-03-14 04:00:00 -> epoch4 = 1615719600000L;
*
* we should use the epoch3 to register timer for window that end with
* [2021-03-14 02:00:00, 2021-03-14 03:00:00] to ensure the window can be fired
* immediately once the window passed.
*
* <pre>
* 2021-03-14 00:00:00 -> epoch0 = 1615708800000L;
* 2021-03-14 01:00:00 -> epoch1 = 1615712400000L;
* 2021-03-14 02:00:00 -> epoch3 = 1615716000000L; register 1615716000000L(epoch3)
* 2021-03-14 02:59:59 -> epoch3 = 1615719599000L; register 1615716000000L(epoch3)
* 2021-03-14 03:00:00 -> epoch3 = 1615716000000L;
*/
/*
* return the larger epoch mills as timer time if the time is leaving the DST.
* eg. Los_Angeles has two timestamp 2021-11-07 01:00:00 when leaving DST.
* <pre>
* 2021-11-07 00:00:00 -> epoch0 = 1636268400000L; 2021-11-07 00:00:00
* 2021-11-07 01:00:00 -> epoch1 = 1636272000000L; the first local timestamp 2021-11-07 01:00:00
* 2021-11-07 01:00:00 -> epoch2 = 1636275600000L; back to local timestamp 2021-11-07 01:00:00
* 2021-11-07 02:00:00 -> epoch3 = 1636279200000L; 2021-11-07 02:00:00
*
* we should use the epoch1 + 1 hour to register timer to ensure the two hours' data can
* be fired properly.
*
* <pre>
* 2021-11-07 00:00:00 -> epoch0 = 1636268400000L;
* 2021-11-07 01:00:00 -> epoch1 = 1636272000000L; register 1636275600000L(epoch2)
* 2021-11-07 02:00:00 -> epoch3 = 1636279200000L;
*/
LocalDateTime utcTimestamp =
LocalDateTime.ofInstant(Instant.ofEpochMilli(utcTimestampMills), UTC_ZONE_ID);
long t1 = utcTimestamp.atZone(shiftTimeZone).toInstant().toEpochMilli();
long t2 =
utcTimestamp
.plusSeconds(SECONDS_PER_HOUR)
.atZone(shiftTimeZone)
.toInstant()
.toEpochMilli();
boolean hasNoEpoch = t1 == t2;
boolean hasTwoEpochs = t2 - t1 > MILLS_PER_HOUR;
if (hasNoEpoch) {
return t1 - t1 % MILLS_PER_HOUR;
} else if (hasTwoEpochs) {
return t1 + MILLS_PER_HOUR;
} else {
return t1;
}
}
LocalDateTime utcTimestamp =
LocalDateTime.ofInstant(Instant.ofEpochMilli(utcTimestampMills), UTC_ZONE_ID);
return utcTimestamp.atZone(shiftTimeZone).toInstant().toEpochMilli();
} | 3.68 |
framework_LayoutManager_getOuterWidthDouble | /**
* Gets the outer width (including margins, paddings and borders) of the
* given element, provided that it has been measured. These elements are
* guaranteed to be measured:
* <ul>
* <li>ManagedLayouts and their child Connectors
* <li>Elements for which there is at least one ElementResizeListener
* <li>Elements for which at least one ManagedLayout has registered a
* dependency
* </ul>
*
* -1 is returned if the element has not been measured. If 0 is returned, it
* might indicate that the element is not attached to the DOM.
*
* @param element
* the element to get the measured size for
* @return the measured outer width (including margins, paddings and
* borders) of the element in pixels.
*/
public final double getOuterWidthDouble(Element element) {
assert needsMeasure(
element) : "Getting measurement for element that is not measured";
return getMeasuredSize(element, nullSize).getOuterWidth();
} | 3.68 |
framework_UidlWriter_write | /**
* Writes a JSON object containing all pending changes to the given UI.
*
* @param ui
* The {@link UI} whose changes to write
* @param writer
* The writer to use
* @param async
* True if this message is sent by the server asynchronously,
* false if it is a response to a client message.
*
* @throws IOException
* If the writing fails.
*/
public void write(UI ui, Writer writer, boolean async) throws IOException {
VaadinSession session = ui.getSession();
VaadinService service = session.getService();
// Purge pending access calls as they might produce additional changes
// to write out
service.runPendingAccessTasks(session);
Set<ClientConnector> processedConnectors = new HashSet<>();
LegacyCommunicationManager manager = session.getCommunicationManager();
ClientCache clientCache = manager.getClientCache(ui);
boolean repaintAll = clientCache.isEmpty();
// Paints components
ConnectorTracker uiConnectorTracker = ui.getConnectorTracker();
getLogger().log(Level.FINE, "* Creating response to client");
while (true) {
List<ClientConnector> connectorsToProcess = new ArrayList<>();
for (ClientConnector c : uiConnectorTracker
.getDirtyVisibleConnectors()) {
if (!processedConnectors.contains(c)) {
connectorsToProcess.add(c);
}
}
if (connectorsToProcess.isEmpty()) {
break;
}
// process parents before children
Collections.sort(connectorsToProcess,
Comparator.comparingInt(conn -> {
int depth = 0;
ClientConnector connector = conn;
// this is a very fast operation, even for 100+ levels
while (connector.getParent() != null) {
++depth;
connector = connector.getParent();
}
return depth;
}));
for (ClientConnector connector : connectorsToProcess) {
// call isDirty() to find out if ConnectorTracker knows the
// connector
boolean initialized = uiConnectorTracker.isDirty(connector)
&& uiConnectorTracker
.isClientSideInitialized(connector);
processedConnectors.add(connector);
try {
connector.beforeClientResponse(!initialized);
} catch (RuntimeException e) {
manager.handleConnectorRelatedException(connector, e);
}
}
}
getLogger().log(Level.FINE, "Found " + processedConnectors.size()
+ " dirty connectors to paint");
uiConnectorTracker.setWritingResponse(true);
try {
int syncId = service.getDeploymentConfiguration()
.isSyncIdCheckEnabled()
? uiConnectorTracker.getCurrentSyncId()
: -1;
writer.write("\"" + ApplicationConstants.SERVER_SYNC_ID + "\": "
+ syncId + ", ");
if (repaintAll) {
writer.write("\"" + ApplicationConstants.RESYNCHRONIZE_ID
+ "\": true, ");
}
int nextClientToServerMessageId = ui
.getLastProcessedClientToServerId() + 1;
writer.write("\"" + ApplicationConstants.CLIENT_TO_SERVER_ID
+ "\": " + nextClientToServerMessageId + ", ");
writer.write("\"changes\" : ");
JsonPaintTarget paintTarget = new JsonPaintTarget(manager, writer,
!repaintAll);
new LegacyUidlWriter().write(ui, writer, paintTarget);
paintTarget.close();
writer.write(", "); // close changes
// send shared state to client
// for now, send the complete state of all modified and new
// components
// Ideally, all this would be sent before "changes", but that causes
// complications with legacy components that create sub-components
// in their paint phase. Nevertheless, this will be processed on the
// client after component creation but before legacy UIDL
// processing.
writer.write("\"state\":");
Set<String> stateUpdateConnectors = new SharedStateWriter()
.write(ui, writer);
writer.write(", "); // close states
// TODO This should be optimized. The type only needs to be
// sent once for each connector id + on refresh. Use the same cache
// as
// widget mapping
writer.write("\"types\":");
new ConnectorTypeWriter().write(ui, writer, paintTarget);
writer.write(", "); // close states
// Send update hierarchy information to the client.
// This could be optimized aswell to send only info if hierarchy has
// actually changed. Much like with the shared state. Note though
// that an empty hierarchy is information aswell (e.g. change from 1
// child to 0 children)
writer.write("\"hierarchy\":");
new ConnectorHierarchyWriter().write(ui, writer,
stateUpdateConnectors);
writer.write(", "); // close hierarchy
// send server to client RPC calls for components in the UI, in call
// order
// collect RPC calls from components in the UI in the order in
// which they were performed, remove the calls from components
writer.write("\"rpc\" : ");
new ClientRpcWriter().write(ui, writer);
writer.write(", "); // close rpc
uiConnectorTracker.markAllConnectorsClean();
writer.write("\"meta\" : ");
SystemMessages messages = ui.getSession().getService()
.getSystemMessages(ui.getLocale(), null);
// TODO hilightedConnector
new MetadataWriter().write(ui, writer, repaintAll, async, messages);
writer.write(", ");
writer.write("\"resources\" : ");
new ResourceWriter().write(ui, writer, paintTarget);
Collection<Class<? extends ClientConnector>> usedClientConnectors = paintTarget
.getUsedClientConnectors();
boolean typeMappingsOpen = false;
List<Class<? extends ClientConnector>> newConnectorTypes = new ArrayList<>();
for (Class<? extends ClientConnector> class1 : usedClientConnectors) {
if (clientCache.cache(class1)) {
// client does not know the mapping key for this type, send
// mapping to client
newConnectorTypes.add(class1);
if (!typeMappingsOpen) {
typeMappingsOpen = true;
writer.write(", \"typeMappings\" : { ");
} else {
writer.write(" , ");
}
String canonicalName = class1.getCanonicalName();
writer.write("\"");
writer.write(canonicalName);
writer.write("\" : ");
writer.write(manager.getTagForType(class1));
}
}
if (typeMappingsOpen) {
writer.write(" }");
}
// TODO PUSH Refactor to TypeInheritanceWriter or something
boolean typeInheritanceMapOpen = false;
if (typeMappingsOpen) {
// send the whole type inheritance map if any new mappings
for (Class<? extends ClientConnector> class1 : usedClientConnectors) {
if (!ClientConnector.class
.isAssignableFrom(class1.getSuperclass())) {
continue;
}
if (!typeInheritanceMapOpen) {
typeInheritanceMapOpen = true;
writer.write(", \"typeInheritanceMap\" : { ");
} else {
writer.write(" , ");
}
writer.write("\"");
writer.write(manager.getTagForType(class1));
writer.write("\" : ");
writer.write(manager.getTagForType(
(Class<? extends ClientConnector>) class1
.getSuperclass()));
}
if (typeInheritanceMapOpen) {
writer.write(" }");
}
}
// TODO Refactor to DependencyWriter or something
/*
* Ensure super classes come before sub classes to get script
* dependency order right. Sub class @JavaScript might assume that
*
* @JavaScript defined by super class is already loaded.
*/
Collections.sort(newConnectorTypes, new Comparator<Class<?>>() {
@Override
public int compare(Class<?> o1, Class<?> o2) {
// TODO optimize using Class.isAssignableFrom?
return hierarchyDepth(o1) - hierarchyDepth(o2);
}
private int hierarchyDepth(Class<?> type) {
if (type == Object.class) {
return 0;
} else {
return hierarchyDepth(type.getSuperclass()) + 1;
}
}
});
List<Dependency> dependencies = new ArrayList<>();
dependencies.addAll(ui.getPage().getPendingDependencies());
dependencies.addAll(Dependency.findDependencies(newConnectorTypes,
manager, new FilterContext(session)));
// Include dependencies in output if there are any
if (!dependencies.isEmpty()) {
writer.write(", \"dependencies\": "
+ JsonUtil.stringify(toJsonArray(dependencies)));
}
session.getDragAndDropService().printJSONResponse(writer);
for (ClientConnector connector : processedConnectors) {
uiConnectorTracker.markClientSideInitialized(connector);
}
assert (uiConnectorTracker.getDirtyConnectors()
.isEmpty()) : "Connectors have been marked as dirty during the end of the paint phase. This is most certainly not intended.";
writePerformanceData(ui, writer);
} finally {
uiConnectorTracker.setWritingResponse(false);
uiConnectorTracker.cleanConnectorMap(true);
}
} | 3.68 |
hadoop_ServiceLauncher_extractCommandOptions | /**
* Extract the command options and apply them to the configuration,
* building an array of processed arguments to hand down to the service.
*
* @param conf configuration to update.
* @param args main arguments. {@code args[0]}is assumed to be
* the service classname and is skipped.
* @return the remaining arguments
* @throws ExitUtil.ExitException if JVM exiting is disabled.
*/
public List<String> extractCommandOptions(Configuration conf,
List<String> args) {
int size = args.size();
if (size <= 1) {
return Collections.emptyList();
}
List<String> coreArgs = args.subList(1, size);
return parseCommandArgs(conf, coreArgs);
} | 3.68 |
hadoop_DatanodeAdminProperties_setHostName | /**
* Set the host name of the datanode.
* @param hostName the host name of the datanode.
*/
public void setHostName(final String hostName) {
this.hostName = hostName;
} | 3.68 |
hbase_HBaseTestingUtility_killMiniHBaseCluster | /**
* Abruptly Shutdown HBase mini cluster. Does not shutdown zk or dfs if running.
* @throws java.io.IOException throws in case command is unsuccessful
*/
public void killMiniHBaseCluster() throws IOException {
cleanup();
if (this.hbaseCluster != null) {
getMiniHBaseCluster().killAll();
this.hbaseCluster = null;
}
if (zooKeeperWatcher != null) {
zooKeeperWatcher.close();
zooKeeperWatcher = null;
}
} | 3.68 |
framework_UIDL_getFloatAttribute | /**
* Gets the named attribute as a float.
*
* @param name
* the name of the attribute to get
* @return the attribute value
*/
public float getFloatAttribute(String name) {
return (float) attr().getRawNumber(name);
} | 3.68 |
shardingsphere-elasticjob_ExecutionService_registerJobCompleted | /**
* Register job completed.
*
* @param shardingContexts sharding contexts
*/
public void registerJobCompleted(final ShardingContexts shardingContexts) {
JobRegistry.getInstance().setJobRunning(jobName, false);
if (!configService.load(true).isMonitorExecution()) {
return;
}
for (int each : shardingContexts.getShardingItemParameters().keySet()) {
jobNodeStorage.removeJobNodeIfExisted(ShardingNode.getRunningNode(each));
}
} | 3.68 |
dubbo_ConcurrentHashSet_clear | /**
* Removes all of the elements from this set. The set will be empty after
* this call returns.
*/
@Override
public void clear() {
map.clear();
} | 3.68 |
framework_EventCellReference_isBody | /**
* Is the cell reference for a cell in the body of the Grid.
*
* @since 7.5
* @return <code>true</code> if referenced cell is in the body,
* <code>false</code> if not
*/
public boolean isBody() {
return section == Section.BODY;
} | 3.68 |
flink_VertexThreadInfoTrackerBuilder_setMaxThreadInfoDepth | /**
* Sets {@code delayBetweenSamples}.
*
* @param maxThreadInfoDepth Limit for the depth of the stack traces included when sampling
* threads.
* @return Builder.
*/
public VertexThreadInfoTrackerBuilder setMaxThreadInfoDepth(int maxThreadInfoDepth) {
this.maxThreadInfoDepth = maxThreadInfoDepth;
return this;
} | 3.68 |
flink_SecurityOptions_forProvider | /**
* Returns a view over the given configuration via which options can be set/retrieved for the
* given provider.
*
* <pre>
* Configuration config = ...
* SecurityOptions.forProvider(config, "my_provider")
* .set(SecurityOptions.DELEGATION_TOKEN_PROVIDER_ENABLED, false)
* ...
* </pre>
*
* @param configuration backing configuration
* @param providerName provider name
* @return view over configuration
*/
@Experimental
public static Configuration forProvider(Configuration configuration, String providerName) {
return new DelegatingConfiguration(
configuration, DelegationTokenProvider.CONFIG_PREFIX + "." + providerName + ".");
} | 3.68 |
flink_TableChange_getColumnName | /** Returns the column name. */
public String getColumnName() {
return columnName;
} | 3.68 |
hadoop_Sets_intersection | /**
* Returns the intersection of two sets as an unmodifiable set.
* The returned set contains all elements that are contained by both backing
* sets.
*
* <p>Results are undefined if {@code set1} and {@code set2} are sets based
* on different equivalence relations (as {@code HashSet}, {@code TreeSet},
* and the keySet of an {@code IdentityHashMap} all are).
*
* @param set1 set1.
* @param set2 set2.
* @param <E> Generics Type E.
* @return a new, empty thread-safe {@code Set}.
*/
public static <E> Set<E> intersection(final Set<E> set1,
final Set<E> set2) {
if (set1 == null) {
throw new NullPointerException("set1");
}
if (set2 == null) {
throw new NullPointerException("set2");
}
Set<E> newSet = new HashSet<>(set1);
newSet.retainAll(set2);
return Collections.unmodifiableSet(newSet);
} | 3.68 |
framework_VScrollTable_isInViewPort | /**
* Detects whether row is visible in tables viewport.
*
* @return
*/
public boolean isInViewPort() {
int absoluteTop = getAbsoluteTop();
int absoluteBottom = absoluteTop + getOffsetHeight();
int viewPortTop = scrollBodyPanel.getAbsoluteTop();
int viewPortBottom = viewPortTop
+ scrollBodyPanel.getOffsetHeight();
return absoluteBottom > viewPortTop
&& absoluteTop < viewPortBottom;
} | 3.68 |
flink_TypeInferenceExtractor_forScalarFunction | /** Extracts a type inference from a {@link ScalarFunction}. */
public static TypeInference forScalarFunction(
DataTypeFactory typeFactory, Class<? extends ScalarFunction> function) {
final FunctionMappingExtractor mappingExtractor =
new FunctionMappingExtractor(
typeFactory,
function,
UserDefinedFunctionHelper.SCALAR_EVAL,
createParameterSignatureExtraction(0),
null,
createReturnTypeResultExtraction(),
createParameterAndReturnTypeVerification());
return extractTypeInference(mappingExtractor);
} | 3.68 |
flink_PekkoRpcActor_handleCallAsync | /**
* Handle asynchronous {@link Callable}. This method simply executes the given {@link Callable}
* in the context of the actor thread.
*
* @param callAsync Call async message
*/
private void handleCallAsync(CallAsync callAsync) {
try {
Object result =
runWithContextClassLoader(
() -> callAsync.getCallable().call(), flinkClassLoader);
getSender().tell(new Status.Success(result), getSelf());
} catch (Throwable e) {
getSender().tell(new Status.Failure(e), getSelf());
}
} | 3.68 |
shardingsphere-elasticjob_FailoverService_removeFailoverInfo | /**
* Remove failover info.
*/
public void removeFailoverInfo() {
for (String each : jobNodeStorage.getJobNodeChildrenKeys(ShardingNode.ROOT)) {
jobNodeStorage.removeJobNodeIfExisted(FailoverNode.getExecutionFailoverNode(Integer.parseInt(each)));
}
} | 3.68 |
flink_StreamExecutionEnvironment_addOperator | /**
* Adds an operator to the list of operators that should be executed when calling {@link
* #execute}.
*
* <p>When calling {@link #execute()} only the operators that where previously added to the list
* are executed.
*
* <p>This is not meant to be used by users. The API methods that create operators must call
* this method.
*/
@Internal
public void addOperator(Transformation<?> transformation) {
Preconditions.checkNotNull(transformation, "transformation must not be null.");
this.transformations.add(transformation);
} | 3.68 |
flink_DataSet_cross | /**
* Initiates a Cross transformation.
*
* <p>A Cross transformation combines the elements of two {@link DataSet DataSets} into one
* DataSet. It builds all pair combinations of elements of both DataSets, i.e., it builds a
* Cartesian product.
*
* <p>The resulting {@link org.apache.flink.api.java.operators.CrossOperator.DefaultCross} wraps
* each pair of crossed elements into a {@link Tuple2}, with the element of the first input
* being the first field of the tuple and the element of the second input being the second field
* of the tuple.
*
* <p>Call {@link
* org.apache.flink.api.java.operators.CrossOperator.DefaultCross#with(org.apache.flink.api.common.functions.CrossFunction)}
* to define a {@link org.apache.flink.api.common.functions.CrossFunction} which is called for
* each pair of crossed elements. The CrossFunction returns a exactly one element for each pair
* of input elements.
*
* @param other The other DataSet with which this DataSet is crossed.
* @return A DefaultCross that returns a Tuple2 for each pair of crossed elements.
* @see org.apache.flink.api.java.operators.CrossOperator.DefaultCross
* @see org.apache.flink.api.common.functions.CrossFunction
* @see DataSet
* @see Tuple2
*/
public <R> CrossOperator.DefaultCross<T, R> cross(DataSet<R> other) {
return new CrossOperator.DefaultCross<>(
this, other, CrossHint.OPTIMIZER_CHOOSES, Utils.getCallLocationName());
} | 3.68 |
hibernate-validator_PredefinedScopeBeanMetaDataManager_getBeanConfigurationForHierarchy | /**
* Returns a list with the configurations for all types contained in the given type's hierarchy (including
* implemented interfaces) starting at the specified type.
*
* @param beanClass The type of interest.
* @param <T> The type of the class to get the configurations for.
* @return A set with the configurations for the complete hierarchy of the given type. May be empty, but never
* {@code null}.
*/
private static <T> List<BeanConfiguration<? super T>> getBeanConfigurationForHierarchy(MetaDataProvider provider, Class<T> beanClass) {
List<BeanConfiguration<? super T>> configurations = newArrayList();
for ( Class<? super T> clazz : ClassHierarchyHelper.getHierarchy( beanClass ) ) {
BeanConfiguration<? super T> configuration = provider.getBeanConfiguration( clazz );
if ( configuration != null ) {
configurations.add( configuration );
}
}
return configurations;
} | 3.68 |
framework_TableQuery_executeUpdateReturnKeys | /**
* Executes the given update query string using either the active connection
* if a transaction is already open, or a new connection from this query's
* connection pool.
*
* Additionally adds a new RowIdChangeEvent to the event buffer.
*
* @param sh
* an instance of StatementHelper, containing the query string
* and parameter values.
* @param row
* the row item to update
* @return Number of affected rows
* @throws SQLException
*/
private int executeUpdateReturnKeys(StatementHelper sh, RowItem row)
throws SQLException {
PreparedStatement pstmt = null;
ResultSet genKeys = null;
Connection connection = null;
try {
connection = getConnection();
pstmt = connection.prepareStatement(sh.getQueryString(),
primaryKeyColumns.toArray(new String[0]));
sh.setParameterValuesToStatement(pstmt);
getLogger().log(Level.FINE, "DB -> {0}", sh.getQueryString());
int result = pstmt.executeUpdate();
genKeys = pstmt.getGeneratedKeys();
RowId newId = getNewRowId(row, genKeys);
bufferedEvents.add(new RowIdChangeEvent(row.getId(), newId));
return result;
} finally {
releaseConnection(connection, pstmt, genKeys);
}
} | 3.68 |
framework_VTooltip_getQuickOpenTimeout | /**
* Returns the time (in ms) during which {@link #getQuickOpenDelay()} should
* be used instead of {@link #getOpenDelay()}. The quick open delay is used
* when the tooltip has very recently been shown, is currently hidden but
* about to be shown again.
*
* @return The quick open timeout (in ms)
*/
public int getQuickOpenTimeout() {
return quickOpenTimeout;
} | 3.68 |
hudi_StreamerUtil_metaClientForReader | /**
* Creates the meta client for reader.
*
* <p>The streaming pipeline process is long-running, so empty table path is allowed,
* the reader would then check and refresh the meta client.
*
* @see org.apache.hudi.source.StreamReadMonitoringFunction
*/
public static HoodieTableMetaClient metaClientForReader(
Configuration conf,
org.apache.hadoop.conf.Configuration hadoopConf) {
final String basePath = conf.getString(FlinkOptions.PATH);
if (conf.getBoolean(FlinkOptions.READ_AS_STREAMING) && !tableExists(basePath, hadoopConf)) {
return null;
} else {
return createMetaClient(basePath, hadoopConf);
}
} | 3.68 |
flink_HiveParserUnparseTranslator_addTranslation | /**
* Register a translation to be performed as part of unparse. ANTLR imposes strict conditions on
* the translations and errors out during TokenRewriteStream.toString() if there is an overlap.
* It expects all the translations to be disjoint (See HIVE-2439). If the translation overlaps
* with any previously registered translation, then it must be either identical or a prefix (in
* which cases it is ignored), or else it must extend the existing translation (i.e. the
* existing translation must be a prefix/suffix of the new translation). All other overlap cases
* result in assertion failures.
*
* @param node target node whose subtree is to be replaced
* @param replacementText text to use as replacement
*/
public void addTranslation(HiveParserASTNode node, String replacementText) {
if (!enabled) {
return;
}
if (node.getOrigin() != null) {
// This node was parsed while loading the definition of another view
// being referenced by the one being created, and we don't want
// to track any expansions for the underlying view.
return;
}
int tokenStartIndex = node.getTokenStartIndex();
int tokenStopIndex = node.getTokenStopIndex();
if (tokenStopIndex < 0) {
// this is for artificially added tokens
return;
}
Translation translation = new Translation();
translation.tokenStopIndex = tokenStopIndex;
translation.replacementText = replacementText;
// Sanity check for overlap with regions already being expanded
assert (tokenStopIndex >= tokenStartIndex);
List<Integer> subsetEntries = new ArrayList<>();
// Is the existing entry and newer entry are subset of one another ?
for (Map.Entry<Integer, Translation> existingEntry :
translations.headMap(tokenStopIndex, true).entrySet()) {
// check if the new entry contains the existing
if (existingEntry.getValue().tokenStopIndex <= tokenStopIndex
&& existingEntry.getKey() >= tokenStartIndex) {
// Collect newer entry is if a super-set of existing entry,
assert (replacementText.contains(existingEntry.getValue().replacementText));
subsetEntries.add(existingEntry.getKey());
// check if the existing entry contains the new
} else if (existingEntry.getValue().tokenStopIndex >= tokenStopIndex
&& existingEntry.getKey() <= tokenStartIndex) {
assert (existingEntry.getValue().replacementText.contains(replacementText));
// we don't need to add this new entry since there's already an overlapping one
return;
}
}
// remove any existing entries that are contained by the new one
for (Integer index : subsetEntries) {
translations.remove(index);
}
// It's all good: create a new entry in the map (or update existing one)
translations.put(tokenStartIndex, translation);
} | 3.68 |
flink_ConfigurationParserUtils_getSlot | /**
* Parses the configuration to get the number of slots and validates the value.
*
* @param configuration configuration object
* @return the number of slots in task manager
*/
public static int getSlot(Configuration configuration) {
int slots = configuration.getInteger(TaskManagerOptions.NUM_TASK_SLOTS, 1);
// we need this because many configs have been written with a "-1" entry
if (slots == -1) {
slots = 1;
}
ConfigurationParserUtils.checkConfigParameter(
slots >= 1,
slots,
TaskManagerOptions.NUM_TASK_SLOTS.key(),
"Number of task slots must be at least one.");
return slots;
} | 3.68 |
hbase_ReadOnlyConfiguration_setAllowNullValueProperties | // Do not add @Override because it is not in Hadoop 2.6.5
public void setAllowNullValueProperties(boolean val) {
throw new UnsupportedOperationException("Read-only Configuration");
} | 3.68 |
flink_DagConnection_getShipStrategy | /**
* Gets the shipping strategy for this connection.
*
* @return The connection's shipping strategy.
*/
public ShipStrategyType getShipStrategy() {
return this.shipStrategy;
} | 3.68 |
streampipes_DataStreamBuilder_format | /**
* Assigns a new {@link org.apache.streampipes.model.grounding.TransportFormat} to the stream definition.
*
* @param format The transport format of the stream at runtime (e.g., JSON or Thrift).
* Use {@link org.apache.streampipes.sdk.helpers.Formats} to use some pre-defined formats
* (or create a new format as described in the developer guide).
* @return this
*/
public DataStreamBuilder format(TransportFormat format) {
this.eventGrounding.setTransportFormats(Collections.singletonList(format));
return this;
} | 3.68 |
dubbo_ReflectUtils_desc2class | /**
* desc to class.
* "[Z" => boolean[].class
* "[[Ljava/util/Map;" => java.util.Map[][].class
*
* @param cl ClassLoader instance.
* @param desc desc.
* @return Class instance.
* @throws ClassNotFoundException
*/
private static Class<?> desc2class(ClassLoader cl, String desc) throws ClassNotFoundException {
switch (desc.charAt(0)) {
case JVM_VOID:
return void.class;
case JVM_BOOLEAN:
return boolean.class;
case JVM_BYTE:
return byte.class;
case JVM_CHAR:
return char.class;
case JVM_DOUBLE:
return double.class;
case JVM_FLOAT:
return float.class;
case JVM_INT:
return int.class;
case JVM_LONG:
return long.class;
case JVM_SHORT:
return short.class;
case 'L':
// "Ljava/lang/Object;" ==> "java.lang.Object"
desc = desc.substring(1, desc.length() - 1).replace('/', '.');
break;
case '[':
// "[[Ljava/lang/Object;" ==> "[[Ljava.lang.Object;"
desc = desc.replace('/', '.');
break;
default:
throw new ClassNotFoundException("Class not found: " + desc);
}
if (cl == null) {
cl = ClassUtils.getClassLoader();
}
return Class.forName(desc, true, cl);
} | 3.68 |
dubbo_StringToDurationConverter_detectAndParse | /**
* Detect the style then parse the value to return a duration.
*
* @param value the value to parse
* @param unit the duration unit to use if the value doesn't specify one ({@code null}
* will default to ms)
* @return the parsed duration
* @throws IllegalArgumentException if the value is not a known style or cannot be
* parsed
*/
public static Duration detectAndParse(String value, ChronoUnit unit) {
return detect(value).parse(value, unit);
} | 3.68 |
hadoop_VolumeFailureInfo_getEstimatedCapacityLost | /**
* Returns estimate of capacity lost. This is said to be an estimate, because
* in some cases it's impossible to know the capacity of the volume, such as if
* we never had a chance to query its capacity before the failure occurred.
*
* @return estimate of capacity lost in bytes
*/
public long getEstimatedCapacityLost() {
return this.estimatedCapacityLost;
} | 3.68 |
querydsl_CollectionUtils_isUnmodifiableType | /**
* Returns true if the type is a known unmodifiable type.
*
* @param clazz the type
* @return true if the type is a known unmodifiable type
*/
public static boolean isUnmodifiableType(Class<?> clazz) {
for (; clazz != null; clazz = clazz.getSuperclass()) {
if (UNMODIFIABLE_TYPES.contains(clazz)) {
return true;
}
}
return false;
} | 3.68 |
querydsl_SQLExpressions_right | /**
* Get the rhs leftmost characters of lhs
*
* @param lhs string
* @param rhs character amount
* @return rhs rightmost characters
*/
public static StringExpression right(Expression<String> lhs, Expression<Integer> rhs) {
return Expressions.stringOperation(Ops.StringOps.RIGHT, lhs, rhs);
} | 3.68 |
zilla_HpackHuffman_transition | // Build one Node x byte transition
private static void transition(Node node, int b)
{
Node cur = node;
String str = node.symbols[b];
for (int i = 7; i >= 0; i--)
{
int bit = (b >>> i) & 0x01; // Using MSB to traverse
cur = bit == 0 ? cur.left : cur.right;
if (cur == null || cur.symbol == 256) // EOS is invalid in sequence
{
return;
}
if (cur.symbol != -1) // Can have two symbols in a byte traversal
{
str = (str == null) ? "" + (char) cur.symbol : str + (char) cur.symbol;
cur = ROOT;
}
}
node.transitions[b] = cur;
node.symbols[b] = str;
}
/*
// Searches the huffman tree for a code
private static int search(Node node, int code, int length) {
for(int i=length-1; i >= 0; i--) {
int bit = ((code >>> i) & 0x01);
if (bit == 0) {
node = node.left;
} else {
node = node.right;
}
if (node == null) {
throw new RuntimeException("Not there");
}
}
return node.sym;
}
// Decodes by traversing huffman tree by single bits
public static void decode(Node root, DirectBuffer buf, int offset, int length) {
StringBuilder sb = new StringBuilder();
Node cur = root;
for (int k = 0; k < length; k++) {
byte b = buf.getByte(offset + k);
for (int i = 7; i >= 0; i--) {
int bit = ((b >>> i) & 0x01);
System.out.print(bit+" ");
if (bit == 0) {
cur = cur.left;
} else {
cur = cur.right;
}
if (cur == null) {
throw new RuntimeException("Not there, but parsed until " + sb.toString());
}
if (cur.sym != -1) {
sb.append((char)cur.sym);
cur = root;
}
}
System.out.println();
}
System.out.println(sb.toString());
} | 3.68 |
flink_HiveTableUtil_createResolvedSchema | /** Create a Flink's ResolvedSchema from Hive table's columns and partition keys. */
public static ResolvedSchema createResolvedSchema(
List<FieldSchema> nonPartCols,
List<FieldSchema> partitionKeys,
Set<String> notNullColumns,
@Nullable UniqueConstraint primaryKey) {
Tuple2<String[], DataType[]> columnInformation =
getColumnInformation(nonPartCols, partitionKeys, notNullColumns, primaryKey);
return new ResolvedSchema(
IntStream.range(0, columnInformation.f0.length)
.mapToObj(
i ->
Column.physical(
columnInformation.f0[i], columnInformation.f1[i]))
.collect(Collectors.toList()),
Collections.emptyList(),
primaryKey == null
? null
: org.apache.flink.table.catalog.UniqueConstraint.primaryKey(
primaryKey.getName(), primaryKey.getColumns()));
} | 3.68 |
hudi_HoodieTableMetaClient_validateTableProperties | /**
* Validate table properties.
*
* @param properties Properties from writeConfig.
*/
public void validateTableProperties(Properties properties) {
// Once meta fields are disabled, it cant be re-enabled for a given table.
if (!getTableConfig().populateMetaFields()
&& Boolean.parseBoolean((String) properties.getOrDefault(HoodieTableConfig.POPULATE_META_FIELDS.key(), HoodieTableConfig.POPULATE_META_FIELDS.defaultValue().toString()))) {
throw new HoodieException(HoodieTableConfig.POPULATE_META_FIELDS.key() + " already disabled for the table. Can't be re-enabled back");
}
// Meta fields can be disabled only when either {@code SimpleKeyGenerator}, {@code ComplexKeyGenerator}, {@code NonpartitionedKeyGenerator} is used
if (!getTableConfig().populateMetaFields()) {
String keyGenClass = KeyGeneratorType.getKeyGeneratorClassName(new HoodieConfig(properties));
if (StringUtils.isNullOrEmpty(keyGenClass)) {
keyGenClass = "org.apache.hudi.keygen.SimpleKeyGenerator";
}
if (!keyGenClass.equals("org.apache.hudi.keygen.SimpleKeyGenerator")
&& !keyGenClass.equals("org.apache.hudi.keygen.NonpartitionedKeyGenerator")
&& !keyGenClass.equals("org.apache.hudi.keygen.ComplexKeyGenerator")) {
throw new HoodieException("Only simple, non-partitioned or complex key generator are supported when meta-fields are disabled. Used: " + keyGenClass);
}
}
//Check to make sure it's not a COW table with consistent hashing bucket index
if (tableType == HoodieTableType.COPY_ON_WRITE) {
String indexType = properties.getProperty("hoodie.index.type");
if (indexType != null && indexType.equals("BUCKET")) {
String bucketEngine = properties.getProperty("hoodie.index.bucket.engine");
if (bucketEngine != null && bucketEngine.equals("CONSISTENT_HASHING")) {
throw new HoodieException("Consistent hashing bucket index does not work with COW table. Use simple bucket index or an MOR table.");
}
}
}
} | 3.68 |
morf_ConnectionResourcesBean_getSchemaName | /**
* @see org.alfasoftware.morf.jdbc.ConnectionResources#getSchemaName()
*/
@Override
public String getSchemaName() {
return schemaName;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.