name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_TaskExecutorRegistrationSuccess_getClusterInformation | /** Gets the cluster information. */
public ClusterInformation getClusterInformation() {
return clusterInformation;
} | 3.68 |
flink_JsonFormatOptionsUtil_getMapNullKeyMode | /**
* Creates handling mode for null key map data.
*
* <p>See {@link #JSON_MAP_NULL_KEY_MODE_FAIL}, {@link #JSON_MAP_NULL_KEY_MODE_DROP}, and {@link
* #JSON_MAP_NULL_KEY_MODE_LITERAL} for more information.
*/
public static JsonFormatOptions.MapNullKeyMode getMapNullKeyMode(ReadableConfig config) {
String mapNullKeyMode = config.get(MAP_NULL_KEY_MODE);
switch (mapNullKeyMode.toUpperCase()) {
case JSON_MAP_NULL_KEY_MODE_FAIL:
return JsonFormatOptions.MapNullKeyMode.FAIL;
case JSON_MAP_NULL_KEY_MODE_DROP:
return JsonFormatOptions.MapNullKeyMode.DROP;
case JSON_MAP_NULL_KEY_MODE_LITERAL:
return JsonFormatOptions.MapNullKeyMode.LITERAL;
default:
throw new TableException(
String.format(
"Unsupported map null key handling mode '%s'. Validator should have checked that.",
mapNullKeyMode));
}
} | 3.68 |
streampipes_PipelineElementMigrationManager_updateFailedPipelineElement | /**
* Update the static properties of the failed pipeline element with its description.
* This allows to adapt the failed pipeline element in the UI to overcome the failed migration.
*
* @param pipelineElement pipeline element with failed migration
*/
protected void updateFailedPipelineElement(InvocableStreamPipesEntity pipelineElement) {
List<StaticProperty> updatedStaticProperties = new ArrayList<>();
if (pipelineElement instanceof DataProcessorInvocation) {
updatedStaticProperties = dataProcessorStorage
.getFirstDataProcessorByAppId(pipelineElement.getAppId())
.getStaticProperties();
} else if (pipelineElement instanceof DataSinkInvocation) {
updatedStaticProperties = dataSinkStorage
.getFirstDataSinkByAppId(pipelineElement.getAppId())
.getStaticProperties();
}
pipelineElement.setStaticProperties(updatedStaticProperties);
} | 3.68 |
hbase_HFile_getStoreFiles | /**
* Returns all HFiles belonging to the given region directory. Could return an empty list.
* @param fs The file system reference.
* @param regionDir The region directory to scan.
* @return The list of files found.
* @throws IOException When scanning the files fails.
*/
public static List<Path> getStoreFiles(FileSystem fs, Path regionDir) throws IOException {
List<Path> regionHFiles = new ArrayList<>();
PathFilter dirFilter = new FSUtils.DirFilter(fs);
FileStatus[] familyDirs = fs.listStatus(regionDir, dirFilter);
for (FileStatus dir : familyDirs) {
FileStatus[] files = fs.listStatus(dir.getPath());
for (FileStatus file : files) {
if (
!file.isDirectory()
&& (!file.getPath().toString().contains(HConstants.HREGION_OLDLOGDIR_NAME))
&& (!file.getPath().toString().contains(HConstants.RECOVERED_EDITS_DIR))
) {
regionHFiles.add(file.getPath());
}
}
}
return regionHFiles;
} | 3.68 |
hbase_RSGroupInfo_addAllServers | /** Adds the given servers to the group. */
public void addAllServers(Collection<Address> hostPort) {
servers.addAll(hostPort);
} | 3.68 |
hadoop_NMClient_getLocalizationStatuses | /**
* Get the localization statuses of a container.
*
* @param containerId the Id of the container
* @param nodeId node Id of the container
*
* @return the status of a container.
*
* @throws YarnException YarnException.
* @throws IOException IOException.
*/
@InterfaceStability.Unstable
public List<LocalizationStatus> getLocalizationStatuses(
ContainerId containerId, NodeId nodeId) throws YarnException,
IOException {
return null;
} | 3.68 |
framework_LogSection_resetTimer | /**
* Resets the timer and inserts a log row indicating this.
*/
private void resetTimer() {
int sinceStart = VDebugWindow.getMillisSinceStart();
int sinceReset = VDebugWindow.resetTimer();
Element row = DOM.createDiv();
row.addClassName(VDebugWindow.STYLENAME + "-reset");
row.setInnerHTML(Icon.RESET_TIMER + " Timer reset");
row.setTitle(VDebugWindow.getTimingTooltip(sinceStart, sinceReset));
contentElement.appendChild(row);
maybeScroll();
} | 3.68 |
framework_InterruptUpload_receiveUpload | /**
* return an OutputStream that simply counts lineends
*/
@Override
public OutputStream receiveUpload(final String filename,
final String MIMEType) {
counter = 0;
total = 0;
return new OutputStream() {
private static final int searchedByte = '\n';
@Override
public void write(final int b) {
total++;
if (b == searchedByte) {
counter++;
}
if (sleep && total % 1000 == 0) {
try {
Thread.sleep(100);
} catch (final InterruptedException e) {
e.printStackTrace();
}
}
}
};
} | 3.68 |
hadoop_NamenodeStatusReport_getServiceAddress | /**
* Get the Service RPC address.
*
* @return The Service RPC address.
*/
public String getServiceAddress() {
return this.serviceAddress;
} | 3.68 |
graphhopper_GraphHopper_setGraphHopperLocation | /**
* Sets the graphhopper folder.
*/
public GraphHopper setGraphHopperLocation(String ghLocation) {
ensureNotLoaded();
if (ghLocation == null)
throw new IllegalArgumentException("graphhopper location cannot be null");
this.ghLocation = ghLocation;
return this;
} | 3.68 |
flink_PlanNode_setDriverStrategy | /**
* Sets the driver strategy for this node. Usually should not be changed.
*
* @param newDriverStrategy The driver strategy.
*/
public void setDriverStrategy(DriverStrategy newDriverStrategy) {
this.driverStrategy = newDriverStrategy;
} | 3.68 |
hbase_RestoreTool_generateBoundaryKeys | /**
* Calculate region boundaries and add all the column families to the table descriptor
* @param regionDirList region dir list
* @return a set of keys to store the boundaries
*/
byte[][] generateBoundaryKeys(ArrayList<Path> regionDirList) throws IOException {
TreeMap<byte[], Integer> map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
// Build a set of keys to store the boundaries
// calculate region boundaries and add all the column families to the table descriptor
for (Path regionDir : regionDirList) {
LOG.debug("Parsing region dir: " + regionDir);
Path hfofDir = regionDir;
if (!fs.exists(hfofDir)) {
LOG.warn("HFileOutputFormat dir " + hfofDir + " not found");
}
FileStatus[] familyDirStatuses = fs.listStatus(hfofDir);
if (familyDirStatuses == null) {
throw new IOException("No families found in " + hfofDir);
}
for (FileStatus stat : familyDirStatuses) {
if (!stat.isDirectory()) {
LOG.warn("Skipping non-directory " + stat.getPath());
continue;
}
boolean isIgnore = false;
String pathName = stat.getPath().getName();
for (String ignore : ignoreDirs) {
if (pathName.contains(ignore)) {
LOG.warn("Skipping non-family directory" + pathName);
isIgnore = true;
break;
}
}
if (isIgnore) {
continue;
}
Path familyDir = stat.getPath();
LOG.debug("Parsing family dir [" + familyDir.toString() + " in region [" + regionDir + "]");
// Skip _logs, etc
if (familyDir.getName().startsWith("_") || familyDir.getName().startsWith(".")) {
continue;
}
// start to parse hfile inside one family dir
Path[] hfiles = FileUtil.stat2Paths(fs.listStatus(familyDir));
for (Path hfile : hfiles) {
if (
hfile.getName().startsWith("_") || hfile.getName().startsWith(".")
|| StoreFileInfo.isReference(hfile.getName())
|| HFileLink.isHFileLink(hfile.getName())
) {
continue;
}
HFile.Reader reader = HFile.createReader(fs, hfile, conf);
final byte[] first, last;
try {
first = reader.getFirstRowKey().get();
last = reader.getLastRowKey().get();
LOG.debug("Trying to figure out region boundaries hfile=" + hfile + " first="
+ Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last));
// To eventually infer start key-end key boundaries
Integer value = map.containsKey(first) ? (Integer) map.get(first) : 0;
map.put(first, value + 1);
value = map.containsKey(last) ? (Integer) map.get(last) : 0;
map.put(last, value - 1);
} finally {
reader.close();
}
}
}
}
return BulkLoadHFilesTool.inferBoundaries(map);
} | 3.68 |
framework_VTabsheet_cancelLastBlurSchedule | /**
* Remove the last blur deferred command from execution.
*/
public void cancelLastBlurSchedule() {
if (blurCommand != null) {
blurCommand.stopSchedule();
blurCommand = null;
}
// We really want to make sure this flag gets reseted at any time
// when something interact with the blur manager and ther's no blur
// command scheduled (as we just canceled it).
nextBlurScheduleCancelled = false;
} | 3.68 |
hibernate-validator_ConstraintValidatorManagerImpl_getInitializedValidator | /**
* @param validatedValueType the type of the value to be validated. Cannot be {@code null}.
* @param descriptor the constraint descriptor for which to get an initialized constraint validator. Cannot be {@code null}
* @param constraintValidatorFactory constraint factory used to instantiate the constraint validator. Cannot be {@code null}.
* @param initializationContext context used on constraint validator initialization
* @param <A> the annotation type
*
* @return an initialized constraint validator for the given type and annotation of the value to be validated.
* {@code null} is returned if no matching constraint validator could be found.
*/
@Override
public <A extends Annotation> ConstraintValidator<A, ?> getInitializedValidator(
Type validatedValueType,
ConstraintDescriptorImpl<A> descriptor,
ConstraintValidatorFactory constraintValidatorFactory,
HibernateConstraintValidatorInitializationContext initializationContext) {
Contracts.assertNotNull( validatedValueType );
Contracts.assertNotNull( descriptor );
Contracts.assertNotNull( constraintValidatorFactory );
Contracts.assertNotNull( initializationContext );
CacheKey key = new CacheKey( descriptor.getAnnotationDescriptor(), validatedValueType, constraintValidatorFactory, initializationContext );
@SuppressWarnings("unchecked")
ConstraintValidator<A, ?> constraintValidator = (ConstraintValidator<A, ?>) constraintValidatorCache.get( key );
if ( constraintValidator == null ) {
constraintValidator = createAndInitializeValidator( validatedValueType, descriptor, constraintValidatorFactory, initializationContext );
constraintValidator = cacheValidator( key, constraintValidator );
}
else {
LOG.tracef( "Constraint validator %s found in cache.", constraintValidator );
}
return DUMMY_CONSTRAINT_VALIDATOR == constraintValidator ? null : constraintValidator;
} | 3.68 |
hibernate-validator_InheritedMethodsHelper_getAllMethods | /**
* Get a list of all methods which the given class declares, implements,
* overrides or inherits. Methods are added by adding first all methods of
* the class itself and its implemented interfaces, then the super class and
* its interfaces, etc.
*
* @param clazz the class for which to retrieve the methods
*
* @return set of all methods of the given class
*/
public static List<Method> getAllMethods(Class<?> clazz) {
Contracts.assertNotNull( clazz );
List<Method> methods = newArrayList();
for ( Class<?> hierarchyClass : ClassHierarchyHelper.getHierarchy( clazz ) ) {
Collections.addAll( methods, run( GetMethods.action( hierarchyClass ) ) );
}
return methods;
} | 3.68 |
framework_VaadinService_setCurrent | /**
* Sets the given Vaadin service as the current service.
*
* @param service
*/
public static void setCurrent(VaadinService service) {
CurrentInstance.set(VaadinService.class, service);
} | 3.68 |
flink_ConfluentRegistryAvroDeserializationSchema_forGeneric | /**
* Creates {@link ConfluentRegistryAvroDeserializationSchema} that produces {@link
* GenericRecord} using the provided reader schema and looks up the writer schema in the
* Confluent Schema Registry.
*
* @param schema schema of produced records
* @param url URL of schema registry to connect
* @param identityMapCapacity maximum number of cached schema versions
* @param registryConfigs map with additional schema registry configs (for example SSL
* properties)
* @return deserialized record in form of {@link GenericRecord}
*/
public static ConfluentRegistryAvroDeserializationSchema<GenericRecord> forGeneric(
Schema schema,
String url,
int identityMapCapacity,
@Nullable Map<String, ?> registryConfigs) {
return new ConfluentRegistryAvroDeserializationSchema<>(
GenericRecord.class,
schema,
new CachedSchemaCoderProvider(null, url, identityMapCapacity, registryConfigs));
} | 3.68 |
framework_DesignContext_getPackage | /**
* Gets the package corresponding to the give prefix, or <code>null</code>
* no package has been registered for the prefix.
*
* @since 7.5.0
* @see #addPackagePrefix(String, String)
* @param prefix
* the prefix to find a package for
* @return the package prefix, or <code>null</code> if no package is
* registered for the provided prefix
*/
public String getPackage(String prefix) {
return prefixToPackage.get(prefix);
} | 3.68 |
hbase_SyncFutureCache_offer | /**
* Offers the sync future back to the cache for reuse.
*/
public void offer(SyncFuture syncFuture) {
// It is ok to overwrite an existing mapping.
syncFutureCache.asMap().put(syncFuture.getThread(), syncFuture);
} | 3.68 |
flink_DriverUtils_checkArgument | /**
* Checks the given boolean condition, and throws an {@code IllegalArgumentException} if the
* condition is not met (evaluates to {@code false}). The exception will have the given error
* message.
*
* @param condition The condition to check
* @param errorMessage The message for the {@code IllegalArgumentException} that is thrown if
* the check fails.
* @throws IllegalArgumentException Thrown, if the condition is violated.
*/
public static void checkArgument(boolean condition, @Nullable Object errorMessage) {
if (!condition) {
throw new IllegalArgumentException(String.valueOf(errorMessage));
}
} | 3.68 |
morf_ChangelogBuilder_withPreferredSQLDialect | /**
* Set which SQL dialect to use for data change statements that cannot be
* converted to a human- readable form. The default is ORACLE.
*
* @param preferredSQLDialect The SQL dialect to use
* @return This builder for chaining
*/
public ChangelogBuilder withPreferredSQLDialect(String preferredSQLDialect) {
this.preferredSQLDialect = preferredSQLDialect;
return this;
} | 3.68 |
hbase_EntityLock_shutdown | /** Returns Shuts down the thread clean and quietly. */
Thread shutdown() {
shutdown = true;
interrupt();
return this;
} | 3.68 |
framework_TableQuery_addRowIdChangeListener | /**
* Adds RowIdChangeListener to this query.
*/
@Override
public void addRowIdChangeListener(RowIdChangeListener listener) {
if (rowIdChangeListeners == null) {
rowIdChangeListeners = new LinkedList<RowIdChangeListener>();
}
rowIdChangeListeners.add(listener);
} | 3.68 |
hadoop_ResourceRequestSet_cleanupZeroNonAnyRR | /**
* Remove all non-Any ResourceRequests from the set. This is necessary cleanup
* to avoid requestSet getting too big.
*/
public void cleanupZeroNonAnyRR() {
Iterator<Entry<String, ResourceRequest>> iter =
this.asks.entrySet().iterator();
while (iter.hasNext()) {
Entry<String, ResourceRequest> entry = iter.next();
if (entry.getKey().equals(ResourceRequest.ANY)) {
// Do not delete ANY RR
continue;
}
if (entry.getValue().getNumContainers() == 0) {
iter.remove();
}
}
} | 3.68 |
flink_TableChange_modify | /**
* A table change to modify a watermark.
*
* <p>It is equal to the following statement:
*
* <pre>
* ALTER TABLE <table_name> MODIFY WATERMARK FOR <row_time> AS <row_time_expression>
* </pre>
*
* @param newWatermarkSpec the modified watermark definition.
* @return a TableChange represents the modification.
*/
static ModifyWatermark modify(WatermarkSpec newWatermarkSpec) {
return new ModifyWatermark(newWatermarkSpec);
} | 3.68 |
framework_AbstractComponentConnector_flush | /*
* (non-Javadoc)
*
* @see com.vaadin.client.ComponentConnector#flush()
*/
@Override
public void flush() {
// No generic implementation. Override if needed
} | 3.68 |
graphhopper_LandmarkStorage_getLandmarksAsGeoJSON | /**
* @return the calculated landmarks as GeoJSON string.
*/
String getLandmarksAsGeoJSON() {
String str = "";
for (int subnetwork = 1; subnetwork < landmarkIDs.size(); subnetwork++) {
int[] lmArray = landmarkIDs.get(subnetwork);
for (int lmIdx = 0; lmIdx < lmArray.length; lmIdx++) {
int index = lmArray[lmIdx];
if (!str.isEmpty())
str += ",";
str += "{ \"type\": \"Feature\", \"geometry\": {\"type\": \"Point\", \"coordinates\": ["
+ na.getLon(index) + ", " + na.getLat(index) + "]},";
str += " \"properties\":{\"node_index\":" + index + ","
+ "\"subnetwork\":" + subnetwork + ","
+ "\"lm_index\":" + lmIdx + "}"
+ "}";
}
}
return "{ \"type\": \"FeatureCollection\", \"features\": [" + str + "]}";
} | 3.68 |
AreaShop_RegionFeature_getRegion | /**
* Get the region of this feature.
* @return region of this feature, or null if generic
*/
public GeneralRegion getRegion() {
return region;
} | 3.68 |
zxing_MaxiCodeReader_extractPureBits | /**
* This method detects a code in a "pure" image -- that is, pure monochrome image
* which contains only an unrotated, unskewed, image of a code, with some white border
* around it. This is a specialized method that works exceptionally fast in this special
* case.
*/
private static BitMatrix extractPureBits(BitMatrix image) throws NotFoundException {
int[] enclosingRectangle = image.getEnclosingRectangle();
if (enclosingRectangle == null) {
throw NotFoundException.getNotFoundInstance();
}
int left = enclosingRectangle[0];
int top = enclosingRectangle[1];
int width = enclosingRectangle[2];
int height = enclosingRectangle[3];
// Now just read off the bits
BitMatrix bits = new BitMatrix(MATRIX_WIDTH, MATRIX_HEIGHT);
for (int y = 0; y < MATRIX_HEIGHT; y++) {
int iy = top + Math.min((y * height + height / 2) / MATRIX_HEIGHT, height - 1);
for (int x = 0; x < MATRIX_WIDTH; x++) {
// srowen: I don't quite understand why the formula below is necessary, but it
// can walk off the image if left + width = the right boundary. So cap it.
int ix = left + Math.min(
(x * width + width / 2 + (y & 0x01) * width / 2) / MATRIX_WIDTH,
width - 1);
if (image.get(ix, iy)) {
bits.set(x, y);
}
}
}
return bits;
} | 3.68 |
morf_ArchiveDataSetWriter_open | /**
* @see org.alfasoftware.morf.xml.XmlStreamProvider#open()
*/
@Override
public void open() {
if (zipOutput != null) {
throw new IllegalStateException("Archive data set instance for [" + file + "] already open");
}
try {
zipOutput = new AdaptedZipOutputStream(new FileOutputStream(file));
// Put the read me entry in
ZipEntry entry = new ZipEntry("_ReadMe.txt");
zipOutput.putNextEntry(entry);
ByteStreams.copy(new ByteArrayInputStream(READ_ME.getBytes("UTF-8")), zipOutput);
} catch (Exception e) {
throw new RuntimeException("Error opening zip archive [" + file + "]", e);
}
} | 3.68 |
flink_LongHybridHashTable_get | /**
* This method is only used for operator fusion codegen to get build row from hash table. If the
* build partition has spilled to disk, return null directly which requires the join operator
* also spill probe row to disk.
*/
public final @Nullable RowIterator<BinaryRowData> get(long probeKey) throws IOException {
if (denseMode) {
if (probeKey >= minKey && probeKey <= maxKey) {
long denseBucket = probeKey - minKey;
long denseBucketOffset = denseBucket << 3;
int denseSegIndex = (int) (denseBucketOffset >>> segmentSizeBits);
int denseSegOffset = (int) (denseBucketOffset & segmentSizeMask);
long address = denseBuckets[denseSegIndex].getLong(denseSegOffset);
this.matchIterator = densePartition.valueIter(address);
} else {
this.matchIterator = densePartition.valueIter(INVALID_ADDRESS);
}
return matchIterator;
} else {
final int hash = hashLong(probeKey, this.currentRecursionDepth);
currentProbePartition =
this.partitionsBeingBuilt.get(hash % partitionsBeingBuilt.size());
if (currentProbePartition.isInMemory()) {
this.matchIterator = currentProbePartition.get(probeKey, hash);
return matchIterator;
} else {
// If the build partition has spilled to disk, return null directly which requires
// the join operator also spill probe row to disk.
return null;
}
}
} | 3.68 |
hudi_SchemaChangeUtils_isTypeUpdateAllow | /**
* Whether to allow the column type to be updated.
* now only support:
* int => long/float/double/String/Decimal
* long => float/double/String/Decimal
* float => double/String/Decimal
* double => String/Decimal
* Decimal => Decimal/String
* String => date/decimal
* date => String
* TODO: support more type update.
*
* @param src origin column type.
* @param dsr new column type.
* @return whether to allow the column type to be updated.
*/
public static boolean isTypeUpdateAllow(Type src, Type dsr) {
if (src.isNestedType() || dsr.isNestedType()) {
throw new IllegalArgumentException("only support update primitive type");
}
if (src.equals(dsr)) {
return true;
}
return isTypeUpdateAllowInternal(src, dsr);
} | 3.68 |
flink_SlotStatus_getAllocationID | /**
* Get the allocation id of this slot.
*
* @return The allocation id if this slot is allocated, otherwise null
*/
public AllocationID getAllocationID() {
return allocationID;
} | 3.68 |
flink_JoinedStreams_equalTo | /**
* Specifies a {@link KeySelector} for elements from the second input with explicit type
* information for the key type.
*
* @param keySelector The KeySelector to be used for extracting the second input's key for
* partitioning.
* @param keyType The type information describing the key type.
*/
public EqualTo equalTo(KeySelector<T2, KEY> keySelector, TypeInformation<KEY> keyType) {
requireNonNull(keySelector);
requireNonNull(keyType);
if (!keyType.equals(this.keyType)) {
throw new IllegalArgumentException(
"The keys for the two inputs are not equal: "
+ "first key = "
+ this.keyType
+ " , second key = "
+ keyType);
}
return new EqualTo(input2.clean(keySelector));
} | 3.68 |
hudi_TimestampBasedAvroKeyGenerator_getPartitionPath | /**
* Parse and fetch partition path based on data type.
*
* @param partitionVal partition path object value fetched from record/row
* @return the parsed partition path based on data type
*/
public String getPartitionPath(Object partitionVal) {
initIfNeeded();
long timeMs;
if (partitionVal instanceof Double) {
timeMs = convertLongTimeToMillis(((Double) partitionVal).longValue());
} else if (partitionVal instanceof Float) {
timeMs = convertLongTimeToMillis(((Float) partitionVal).longValue());
} else if (partitionVal instanceof Long) {
timeMs = convertLongTimeToMillis((Long) partitionVal);
} else if (partitionVal instanceof Timestamp && isConsistentLogicalTimestampEnabled()) {
timeMs = ((Timestamp) partitionVal).getTime();
} else if (partitionVal instanceof Integer) {
timeMs = convertLongTimeToMillis(((Integer) partitionVal).longValue());
} else if (partitionVal instanceof BigDecimal) {
timeMs = convertLongTimeToMillis(((BigDecimal) partitionVal).longValue());
} else if (partitionVal instanceof LocalDate) {
// Avro uses LocalDate to represent the Date value internal.
timeMs = convertLongTimeToMillis(((LocalDate) partitionVal).toEpochDay());
} else if (partitionVal instanceof CharSequence) {
if (!inputFormatter.isPresent()) {
throw new HoodieException("Missing input formatter. Ensure "
+ TIMESTAMP_INPUT_DATE_FORMAT.key()
+ " config is set when timestampType is DATE_STRING or MIXED!");
}
DateTime parsedDateTime = inputFormatter.get().parseDateTime(partitionVal.toString());
if (this.outputDateTimeZone == null) {
// Use the timezone that came off the date that was passed in, if it had one
partitionFormatter = partitionFormatter.withZone(parsedDateTime.getZone());
}
timeMs = inputFormatter.get().parseDateTime(partitionVal.toString()).getMillis();
} else {
throw new HoodieNotSupportedException(
"Unexpected type for partition field: " + partitionVal.getClass().getName());
}
DateTime timestamp = new DateTime(timeMs, outputDateTimeZone);
String partitionPath = timestamp.toString(partitionFormatter);
if (encodePartitionPath) {
partitionPath = PartitionPathEncodeUtils.escapePathName(partitionPath);
}
return hiveStylePartitioning ? getPartitionPathFields().get(0) + "=" + partitionPath : partitionPath;
} | 3.68 |
framework_MenuBar_scrollSelectionIntoView | /**
* Scroll the selected item into view.
*
* @since 7.2.6
*/
public void scrollSelectionIntoView() {
scrollItemIntoView(selectedItem);
} | 3.68 |
framework_Form_addValidator | /**
* Adding validators directly to form is not supported.
*
* Add the validators to form fields instead.
*/
@Override
public void addValidator(Validator validator) {
throw new UnsupportedOperationException();
} | 3.68 |
dubbo_AccessLogFilter_destroy | // test purpose only
public void destroy() {
future.cancel(true);
} | 3.68 |
querydsl_NumberExpression_subtract | /**
* Create a {@code this - right} expression
*
* <p>Get the difference of this and right</p>
*
* @param right
* @return this - right
*/
public <N extends Number & Comparable<?>> NumberExpression<T> subtract(N right) {
return Expressions.numberOperation(getType(), Ops.SUB, mixin, ConstantImpl.create(right));
} | 3.68 |
flink_CollectIteratorAssert_matchThenNext | /**
* Check if any pointing data is identical to the record from the stream, and move the pointer
* to next record if matched.
*
* @param record Record from stream
*/
private boolean matchThenNext(T record) {
for (RecordsFromSplit<T> recordsFromSplit : recordsFromSplits) {
if (!recordsFromSplit.hasNext()) {
continue;
}
if (record.equals(recordsFromSplit.current())) {
recordsFromSplit.forward();
return true;
}
}
return false;
} | 3.68 |
AreaShop_SignsFeature_getSigns | /**
* Get the signs of this region.
* @return List of signs
*/
public List<RegionSign> getSigns() {
return Collections.unmodifiableList(new ArrayList<>(signs.values()));
} | 3.68 |
flink_CompensatedSum_value | /** The value of the sum. */
public double value() {
return value;
} | 3.68 |
AreaShop_GeneralRegion_setOwner | /**
* Change the owner of the region.
* @param player The player that should be the owner
*/
public void setOwner(UUID player) {
if(this instanceof RentRegion) {
((RentRegion)this).setRenter(player);
} else {
((BuyRegion)this).setBuyer(player);
}
} | 3.68 |
framework_VaadinPortletSession_generateActionURL | /**
* Creates a new action URL.
*
* Creating an action URL is only supported when processing a suitable
* request (render or resource request, including normal Vaadin UIDL
* processing) and will return null if not processing a suitable request.
*
* @param action
* the action parameter (javax.portlet.action parameter value in
* JSR-286)
* @return action URL or null if called outside a MimeRequest (outside a
* UIDL request or similar)
*/
public PortletURL generateActionURL(String action) {
PortletURL url = null;
PortletResponse response = getCurrentResponse();
if (response instanceof MimeResponse) {
url = ((MimeResponse) response).createActionURL();
url.setParameter("javax.portlet.action", action);
} else {
return null;
}
return url;
} | 3.68 |
framework_RequiredIndicatorForFieldsWithoutCaption_getTicketNumber | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber()
*/
@Override
protected Integer getTicketNumber() {
return 12077;
} | 3.68 |
zxing_ModulusPoly_getDegree | /**
* @return degree of this polynomial
*/
int getDegree() {
return coefficients.length - 1;
} | 3.68 |
zxing_Detector_expandSquare | /**
* Expand the square represented by the corner points by pushing out equally in all directions
*
* @param cornerPoints the corners of the square, which has the bull's eye at its center
* @param oldSide the original length of the side of the square in the target bit matrix
* @param newSide the new length of the size of the square in the target bit matrix
* @return the corners of the expanded square
*/
private static ResultPoint[] expandSquare(ResultPoint[] cornerPoints, int oldSide, int newSide) {
float ratio = newSide / (2.0f * oldSide);
float dx = cornerPoints[0].getX() - cornerPoints[2].getX();
float dy = cornerPoints[0].getY() - cornerPoints[2].getY();
float centerx = (cornerPoints[0].getX() + cornerPoints[2].getX()) / 2.0f;
float centery = (cornerPoints[0].getY() + cornerPoints[2].getY()) / 2.0f;
ResultPoint result0 = new ResultPoint(centerx + ratio * dx, centery + ratio * dy);
ResultPoint result2 = new ResultPoint(centerx - ratio * dx, centery - ratio * dy);
dx = cornerPoints[1].getX() - cornerPoints[3].getX();
dy = cornerPoints[1].getY() - cornerPoints[3].getY();
centerx = (cornerPoints[1].getX() + cornerPoints[3].getX()) / 2.0f;
centery = (cornerPoints[1].getY() + cornerPoints[3].getY()) / 2.0f;
ResultPoint result1 = new ResultPoint(centerx + ratio * dx, centery + ratio * dy);
ResultPoint result3 = new ResultPoint(centerx - ratio * dx, centery - ratio * dy);
return new ResultPoint[]{result0, result1, result2, result3};
} | 3.68 |
hmily_RpcMediator_getInstance | /**
* Gets instance.
*
* @return the instance
*/
public static RpcMediator getInstance() {
return INSTANCE;
} | 3.68 |
pulsar_ClientConfiguration_setUseTls | /**
* Configure whether to use TLS encryption on the connection <i>(default: false)</i>.
*
* @param useTls
*/
public void setUseTls(boolean useTls) {
confData.setUseTls(useTls);
} | 3.68 |
hbase_MetricsSink_applyBatch | /**
* Convience method to change metrics when a batch of operations are applied.
* @param batchSize total number of mutations that are applied/replicated
* @param hfileSize total number of hfiles that are applied/replicated
*/
public void applyBatch(long batchSize, long hfileSize) {
applyBatch(batchSize);
mss.incrAppliedHFiles(hfileSize);
} | 3.68 |
flink_AccumulatorHelper_mergeInto | /**
* Merge two collections of accumulators. The second will be merged into the first.
*
* @param target The collection of accumulators that will be updated
* @param toMerge The collection of accumulators that will be merged into the other
*/
public static void mergeInto(
Map<String, OptionalFailure<Accumulator<?, ?>>> target,
Map<String, Accumulator<?, ?>> toMerge) {
for (Map.Entry<String, Accumulator<?, ?>> otherEntry : toMerge.entrySet()) {
OptionalFailure<Accumulator<?, ?>> ownAccumulator = target.get(otherEntry.getKey());
if (ownAccumulator == null) {
// Create initial counter (copy!)
target.put(
otherEntry.getKey(),
wrapUnchecked(otherEntry.getKey(), () -> otherEntry.getValue().clone()));
} else if (ownAccumulator.isFailure()) {
continue;
} else {
Accumulator<?, ?> accumulator = ownAccumulator.getUnchecked();
// Both should have the same type
compareAccumulatorTypes(
otherEntry.getKey(),
accumulator.getClass(),
otherEntry.getValue().getClass());
// Merge target counter with other counter
target.put(
otherEntry.getKey(),
wrapUnchecked(
otherEntry.getKey(),
() -> mergeSingle(accumulator, otherEntry.getValue().clone())));
}
}
} | 3.68 |
hadoop_StoreContextBuilder_setEnableCSE | /**
* set is client side encryption boolean value.
* @param value value indicating if client side encryption is enabled or not.
* @return builder instance.
*/
public StoreContextBuilder setEnableCSE(
boolean value) {
isCSEEnabled = value;
return this;
} | 3.68 |
dubbo_AbstractConfigManager_getConfigByName | /**
* Get config by name if existed
*
* @param cls
* @param name
* @return
*/
protected <C extends AbstractConfig> C getConfigByName(Class<? extends C> cls, String name) {
Map<String, ? extends C> configsMap = getConfigsMap(cls);
if (configsMap.isEmpty()) {
return null;
}
// try to find config by name
if (ReflectUtils.hasMethod(cls, CONFIG_NAME_READ_METHOD)) {
List<C> list = configsMap.values().stream()
.filter(cfg -> name.equals(getConfigName(cfg)))
.collect(Collectors.toList());
if (list.size() > 1) {
throw new IllegalStateException("Found more than one config by name: " + name + ", instances: " + list
+ ". Please remove redundant configs or get config by id.");
} else if (list.size() == 1) {
return list.get(0);
}
}
return null;
} | 3.68 |
hadoop_AzureBlobFileSystem_getRootCause | /**
* Gets the root cause of a provided {@link Throwable}. If there is no cause for the
* {@link Throwable} provided into this function, the original {@link Throwable} is returned.
*
* @param throwable starting {@link Throwable}
* @return root cause {@link Throwable}
*/
private Throwable getRootCause(Throwable throwable) {
if (throwable == null) {
throw new IllegalArgumentException("throwable can not be null");
}
Throwable result = throwable;
while (result.getCause() != null) {
result = result.getCause();
}
return result;
} | 3.68 |
morf_AliasedField_plus | /**
* @param expression value to add to this field.
* @return A new expression using {@link MathsField} and {@link MathsOperator#PLUS}.
*/
public final MathsField plus(AliasedField expression) {
return new MathsField(this, MathsOperator.PLUS, potentiallyBracketExpression(expression));
} | 3.68 |
hbase_NettyFutureUtils_safeWriteAndFlush | /**
* Call writeAndFlush on the channel and eat the returned future by logging the error when the
* future is completed with error.
*/
public static void safeWriteAndFlush(ChannelOutboundInvoker channel, Object msg) {
consume(channel.writeAndFlush(msg));
} | 3.68 |
hadoop_Hash_getHashType | /**
* This utility method converts the name of the configured
* hash type to a symbolic constant.
* @param conf configuration
* @return one of the predefined constants
*/
public static int getHashType(Configuration conf) {
String name = conf.get(HADOOP_UTIL_HASH_TYPE_KEY,
HADOOP_UTIL_HASH_TYPE_DEFAULT);
return parseHashType(name);
} | 3.68 |
hadoop_FederationStateStoreFacade_getApplicationSubmissionContext | /**
* Get ApplicationSubmissionContext according to ApplicationId.
* We don't throw exceptions. If the application cannot be found, we return null.
*
* @param appId ApplicationId
* @return ApplicationSubmissionContext of ApplicationId
*/
public ApplicationSubmissionContext getApplicationSubmissionContext(ApplicationId appId) {
try {
GetApplicationHomeSubClusterResponse response = stateStore.getApplicationHomeSubCluster(
GetApplicationHomeSubClusterRequest.newInstance(appId));
ApplicationHomeSubCluster appHomeSubCluster = response.getApplicationHomeSubCluster();
return appHomeSubCluster.getApplicationSubmissionContext();
} catch (Exception e) {
LOG.error("getApplicationSubmissionContext error, applicationId = {}.", appId, e);
return null;
}
} | 3.68 |
framework_AbstractComponent_setSizeFull | /*
* (non-Javadoc)
*
* @see com.vaadin.server.Sizeable#setSizeFull()
*/
@Override
public void setSizeFull() {
setWidth(100, Unit.PERCENTAGE);
setHeight(100, Unit.PERCENTAGE);
} | 3.68 |
streampipes_AdapterDescription_setCorrespondingServiceGroup | /**
* @deprecated check if the service group can be removed as a single pipeline element
* can correspond to different service groups
*/
@Deprecated
public void setCorrespondingServiceGroup(String correspondingServiceGroup) {
this.correspondingServiceGroup = correspondingServiceGroup;
} | 3.68 |
hadoop_ListResultEntrySchema_eTag | /**
* Get the etag value.
*
* @return the etag value
*/
public String eTag() {
return eTag;
} | 3.68 |
pulsar_LedgerMetadataUtils_buildMetadataForPlacementPolicyConfig | /**
* Build additional metadata for the placement policy config.
*
* @param className
* the ensemble placement policy classname
* @param properties
* the ensemble placement policy properties
* @return
* the additional metadata
* @throws ParseJsonException
* placement policy configuration encode error
*/
static Map<String, byte[]> buildMetadataForPlacementPolicyConfig(
Class<? extends EnsemblePlacementPolicy> className, Map<String, Object> properties)
throws EnsemblePlacementPolicyConfig.ParseEnsemblePlacementPolicyConfigException {
EnsemblePlacementPolicyConfig config = new EnsemblePlacementPolicyConfig(className, properties);
return Map.of(EnsemblePlacementPolicyConfig.ENSEMBLE_PLACEMENT_POLICY_CONFIG, config.encode());
} | 3.68 |
hbase_StorageClusterStatusModel_getReadRequestsCount | /** Returns the current total read requests made to region */
@XmlAttribute
public long getReadRequestsCount() {
return readRequestsCount;
} | 3.68 |
framework_GridLayout_setCursorX | /**
* Sets the current cursor x-position. This is usually handled automatically
* by GridLayout.
*
* @param cursorX
* current cursor x-position
*/
public void setCursorX(int cursorX) {
this.cursorX = cursorX;
} | 3.68 |
framework_VTree_deselectNode | /**
* Deselects a node
*
* @param node
* The node to deselect
*/
private void deselectNode(TreeNode node) {
node.setSelected(false);
selectedIds.remove(node.key);
selectionHasChanged = true;
} | 3.68 |
hadoop_LeveldbIterator_peekPrev | /**
* @return the previous element in the iteration, without rewinding the
* iteration.
*/
public Map.Entry<byte[], byte[]> peekPrev() throws DBException {
try {
return iter.peekPrev();
} catch (DBException e) {
throw e;
} catch (RuntimeException e) {
throw new DBException(e.getMessage(), e);
}
} | 3.68 |
morf_SpreadsheetDataSetConsumer_spreadsheetifyName | /**
* Converts camel capped names to something we can show in a spreadsheet.
*
* @param name Name to convert.
* @return A human readable version of the name wtih camel caps replaced by spaces.
*/
private String spreadsheetifyName(String name) {
return StringUtils.capitalize(name).replaceAll("([A-Z])", " $1").trim();
} | 3.68 |
hbase_SingleColumnValueExcludeFilter_parseFrom | /**
* Parse a serialized representation of {@link SingleColumnValueExcludeFilter}
* @param pbBytes A pb serialized {@link SingleColumnValueExcludeFilter} instance
* @return An instance of {@link SingleColumnValueExcludeFilter} made from <code>bytes</code>
* @throws DeserializationException if an error occurred
* @see #toByteArray
*/
public static SingleColumnValueExcludeFilter parseFrom(final byte[] pbBytes)
throws DeserializationException {
FilterProtos.SingleColumnValueExcludeFilter proto;
try {
proto = FilterProtos.SingleColumnValueExcludeFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
FilterProtos.SingleColumnValueFilter parentProto = proto.getSingleColumnValueFilter();
final CompareOperator compareOp = CompareOperator.valueOf(parentProto.getCompareOp().name());
final ByteArrayComparable comparator;
try {
comparator = ProtobufUtil.toComparator(parentProto.getComparator());
} catch (IOException ioe) {
throw new DeserializationException(ioe);
}
return new SingleColumnValueExcludeFilter(
parentProto.hasColumnFamily() ? parentProto.getColumnFamily().toByteArray() : null,
parentProto.hasColumnQualifier() ? parentProto.getColumnQualifier().toByteArray() : null,
compareOp, comparator, parentProto.getFilterIfMissing(), parentProto.getLatestVersionOnly());
} | 3.68 |
hbase_Encryption_decryptWithSubjectKey | /**
* Decrypts a block of ciphertext with the symmetric key resolved for the given subject
* @param out plaintext
* @param in ciphertext
* @param outLen the expected plaintext length
* @param subject the subject's key alias
* @param conf configuration
* @param cipher the encryption algorithm
* @param iv the initialization vector, can be null
*/
public static void decryptWithSubjectKey(OutputStream out, InputStream in, int outLen,
String subject, Configuration conf, Cipher cipher, byte[] iv) throws IOException {
Key key = getSecretKeyForSubject(subject, conf);
if (key == null) {
throw new IOException("No key found for subject '" + subject + "'");
}
Decryptor d = cipher.getDecryptor();
d.setKey(key);
d.setIv(iv); // can be null
try {
decrypt(out, in, outLen, d);
} catch (IOException e) {
// If the current cipher algorithm fails to unwrap, try the alternate cipher algorithm, if one
// is configured
String alternateAlgorithm = conf.get(HConstants.CRYPTO_ALTERNATE_KEY_ALGORITHM_CONF_KEY);
if (alternateAlgorithm != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Unable to decrypt data with current cipher algorithm '"
+ conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES)
+ "'. Trying with the alternate cipher algorithm '" + alternateAlgorithm
+ "' configured.");
}
Cipher alterCipher = Encryption.getCipher(conf, alternateAlgorithm);
if (alterCipher == null) {
throw new RuntimeException("Cipher '" + alternateAlgorithm + "' not available");
}
d = alterCipher.getDecryptor();
d.setKey(key);
d.setIv(iv); // can be null
decrypt(out, in, outLen, d);
} else {
throw new IOException(e);
}
}
} | 3.68 |
flink_SqlFunctionUtils_rpad | /**
* Returns the string str right-padded with the string pad to a length of len characters. If str
* is longer than len, the return value is shortened to len characters.
*/
public static String rpad(String base, int len, String pad) {
if (len < 0 || "".equals(pad)) {
return null;
} else if (len == 0) {
return "";
}
char[] data = new char[len];
char[] baseChars = base.toCharArray();
char[] padChars = pad.toCharArray();
int pos = 0;
// copy the base
while (pos < base.length() && pos < len) {
data[pos] = baseChars[pos];
pos += 1;
}
// copy the padding
while (pos < len) {
int i = 0;
while (i < pad.length() && i < len - pos) {
data[pos + i] = padChars[i];
i += 1;
}
pos += pad.length();
}
return new String(data);
} | 3.68 |
hudi_HoodieIndexID_isPartition | /**
* Is this ID a Partition type ?
*
* @return True if this ID of PartitionID type
*/
public final boolean isPartition() {
return (getType() == Type.PARTITION);
} | 3.68 |
dubbo_RegistryDirectory_destroyAllInvokers | /**
* Close all invokers
*/
@Override
protected void destroyAllInvokers() {
Map<URL, Invoker<T>> localUrlInvokerMap = this.urlInvokerMap; // local reference
if (!CollectionUtils.isEmptyMap(localUrlInvokerMap)) {
for (Invoker<T> invoker : new ArrayList<>(localUrlInvokerMap.values())) {
try {
invoker.destroy();
} catch (Throwable t) {
// 1-15 - Failed to destroy service
logger.warn(
REGISTRY_FAILED_DESTROY_SERVICE,
"",
"",
"Failed to destroy service " + serviceKey + " to provider " + invoker.getUrl(),
t);
}
}
localUrlInvokerMap.clear();
}
this.urlInvokerMap = null;
this.cachedInvokerUrls = null;
destroyInvokers();
} | 3.68 |
hbase_MetricsSnapshot_addSnapshotClone | /**
* Record a single instance of a snapshot cloned table
* @param time time that the snapshot clone took
*/
public void addSnapshotClone(long time) {
source.updateSnapshotCloneTime(time);
} | 3.68 |
morf_SqlServer_matchesProduct | /**
* @see org.alfasoftware.morf.jdbc.DatabaseType#matchesProduct(java.lang.String)
*/
@Override
public boolean matchesProduct(String product) {
return product.toLowerCase().contains("microsoft sql server");
} | 3.68 |
hbase_CompressionUtil_compressionOverhead | /**
* Most compression algorithms can be presented with pathological input that causes an expansion
* rather than a compression. Hadoop's compression API requires that we calculate additional
* buffer space required for the worst case. There is a formula developed for gzip that applies as
* a ballpark to all LZ variants. It should be good enough for now and has been tested as such
* with a range of different inputs.
*/
public static int compressionOverhead(int bufferSize) {
// Given an input buffer of 'buffersize' bytes we presume a worst case expansion of
// 32 bytes (block header) and addition 1/6th of the input size.
return (bufferSize / 6) + 32;
} | 3.68 |
morf_SelectStatementBuilder_groupBy | /**
* Specifies that the records should be grouped by the specified fields.
*
* <blockquote><pre>
* select()
* .from(tableRef("Foo"))
* .groupBy(field("age"));</pre></blockquote>
*
* @param fields the fields to group by
* @return this, for method chaining.
*/
public SelectStatementBuilder groupBy(Iterable<? extends AliasedFieldBuilder> fields) {
// Add the list
groupBys.addAll(Builder.Helper.buildAll(Lists.newArrayList(fields)));
return this;
} | 3.68 |
zxing_WhiteRectangleDetector_detect | /**
* <p>
* Detects a candidate barcode-like rectangular region within an image. It
* starts around the center of the image, increases the size of the candidate
* region until it finds a white rectangular region.
* </p>
*
* @return {@link ResultPoint}[] describing the corners of the rectangular
* region. The first and last points are opposed on the diagonal, as
* are the second and third. The first point will be the topmost
* point and the last, the bottommost. The second point will be
* leftmost and the third, the rightmost
* @throws NotFoundException if no Data Matrix Code can be found
*/
public ResultPoint[] detect() throws NotFoundException {
int left = leftInit;
int right = rightInit;
int up = upInit;
int down = downInit;
boolean sizeExceeded = false;
boolean aBlackPointFoundOnBorder = true;
boolean atLeastOneBlackPointFoundOnRight = false;
boolean atLeastOneBlackPointFoundOnBottom = false;
boolean atLeastOneBlackPointFoundOnLeft = false;
boolean atLeastOneBlackPointFoundOnTop = false;
while (aBlackPointFoundOnBorder) {
aBlackPointFoundOnBorder = false;
// .....
// . |
// .....
boolean rightBorderNotWhite = true;
while ((rightBorderNotWhite || !atLeastOneBlackPointFoundOnRight) && right < width) {
rightBorderNotWhite = containsBlackPoint(up, down, right, false);
if (rightBorderNotWhite) {
right++;
aBlackPointFoundOnBorder = true;
atLeastOneBlackPointFoundOnRight = true;
} else if (!atLeastOneBlackPointFoundOnRight) {
right++;
}
}
if (right >= width) {
sizeExceeded = true;
break;
}
// .....
// . .
// .___.
boolean bottomBorderNotWhite = true;
while ((bottomBorderNotWhite || !atLeastOneBlackPointFoundOnBottom) && down < height) {
bottomBorderNotWhite = containsBlackPoint(left, right, down, true);
if (bottomBorderNotWhite) {
down++;
aBlackPointFoundOnBorder = true;
atLeastOneBlackPointFoundOnBottom = true;
} else if (!atLeastOneBlackPointFoundOnBottom) {
down++;
}
}
if (down >= height) {
sizeExceeded = true;
break;
}
// .....
// | .
// .....
boolean leftBorderNotWhite = true;
while ((leftBorderNotWhite || !atLeastOneBlackPointFoundOnLeft) && left >= 0) {
leftBorderNotWhite = containsBlackPoint(up, down, left, false);
if (leftBorderNotWhite) {
left--;
aBlackPointFoundOnBorder = true;
atLeastOneBlackPointFoundOnLeft = true;
} else if (!atLeastOneBlackPointFoundOnLeft) {
left--;
}
}
if (left < 0) {
sizeExceeded = true;
break;
}
// .___.
// . .
// .....
boolean topBorderNotWhite = true;
while ((topBorderNotWhite || !atLeastOneBlackPointFoundOnTop) && up >= 0) {
topBorderNotWhite = containsBlackPoint(left, right, up, true);
if (topBorderNotWhite) {
up--;
aBlackPointFoundOnBorder = true;
atLeastOneBlackPointFoundOnTop = true;
} else if (!atLeastOneBlackPointFoundOnTop) {
up--;
}
}
if (up < 0) {
sizeExceeded = true;
break;
}
}
if (!sizeExceeded) {
int maxSize = right - left;
ResultPoint z = null;
for (int i = 1; z == null && i < maxSize; i++) {
z = getBlackPointOnSegment(left, down - i, left + i, down);
}
if (z == null) {
throw NotFoundException.getNotFoundInstance();
}
ResultPoint t = null;
//go down right
for (int i = 1; t == null && i < maxSize; i++) {
t = getBlackPointOnSegment(left, up + i, left + i, up);
}
if (t == null) {
throw NotFoundException.getNotFoundInstance();
}
ResultPoint x = null;
//go down left
for (int i = 1; x == null && i < maxSize; i++) {
x = getBlackPointOnSegment(right, up + i, right - i, up);
}
if (x == null) {
throw NotFoundException.getNotFoundInstance();
}
ResultPoint y = null;
//go up left
for (int i = 1; y == null && i < maxSize; i++) {
y = getBlackPointOnSegment(right, down - i, right - i, down);
}
if (y == null) {
throw NotFoundException.getNotFoundInstance();
}
return centerEdges(y, z, x, t);
} else {
throw NotFoundException.getNotFoundInstance();
}
} | 3.68 |
dubbo_MetricsSupport_fillZero | /**
* Generate a complete indicator item for an interface/method
*/
public static <T> void fillZero(Map<?, Map<T, AtomicLong>> data) {
if (CollectionUtils.isEmptyMap(data)) {
return;
}
Set<T> allKeyMetrics =
data.values().stream().flatMap(map -> map.keySet().stream()).collect(Collectors.toSet());
data.forEach((keyWrapper, mapVal) -> {
for (T key : allKeyMetrics) {
mapVal.computeIfAbsent(key, k -> new AtomicLong(0));
}
});
} | 3.68 |
hadoop_BalanceProcedureScheduler_waitUntilDone | /**
* Wait permanently until the job is done.
*/
public void waitUntilDone(BalanceJob job) {
BalanceJob found = findJob(job);
if (found == null || found.isJobDone()) {
return;
}
while (!found.isJobDone()) {
try {
found.waitJobDone();
} catch (InterruptedException e) {
}
}
} | 3.68 |
flink_CoGroupedStreams_where | /**
* Specifies a {@link KeySelector} for elements from the first input with explicit type
* information.
*
* @param keySelector The KeySelector to be used for extracting the first input's key for
* partitioning.
* @param keyType The type information describing the key type.
*/
public <KEY> Where<KEY> where(KeySelector<T1, KEY> keySelector, TypeInformation<KEY> keyType) {
Preconditions.checkNotNull(keySelector);
Preconditions.checkNotNull(keyType);
return new Where<>(input1.clean(keySelector), keyType);
} | 3.68 |
hadoop_ChangeDetectionPolicy_onChangeDetected | /**
* Takes appropriate action based on {@link #getMode() mode} when a change has
* been detected.
*
* @param revisionId the expected revision id
* @param newRevisionId the detected revision id
* @param uri the URI of the object being accessed
* @param position the position being read in the object
* @param operation the operation being performed on the object (e.g. open or
* re-open) that triggered the change detection
* @param timesAlreadyDetected number of times a change has already been
* detected on the current stream
* @return a pair of: was a change detected, and any exception to throw.
* If the change was detected, this updates a counter in the stream
* statistics; If an exception was returned it is thrown after the counter
* update.
*/
public ImmutablePair<Boolean, RemoteFileChangedException> onChangeDetected(
String revisionId,
String newRevisionId,
String uri,
long position,
String operation,
long timesAlreadyDetected) {
String positionText = position >= 0 ? (" at " + position) : "";
switch (mode) {
case None:
// something changed; we don't care.
return new ImmutablePair<>(false, null);
case Warn:
if (timesAlreadyDetected == 0) {
// only warn on the first detection to avoid a noisy log
LOG.warn(
String.format(
"%s change detected on %s %s%s. Expected %s got %s",
getSource(), operation, uri, positionText, revisionId,
newRevisionId));
return new ImmutablePair<>(true, null);
}
return new ImmutablePair<>(false, null);
case Client:
case Server:
default:
// mode == Client or Server; will trigger on version failures
// of getObjectMetadata even on server.
return new ImmutablePair<>(true,
new RemoteFileChangedException(uri,
operation,
String.format("%s "
+ CHANGE_DETECTED
+ " during %s%s."
+ " Expected %s got %s",
getSource(), operation, positionText, revisionId, newRevisionId)));
}
} | 3.68 |
flink_FsCheckpointStreamFactory_isClosed | /**
* Checks whether the stream is closed.
*
* @return True if the stream was closed, false if it is still open.
*/
public boolean isClosed() {
return closed;
} | 3.68 |
flink_TableChange_modifyColumnComment | /**
* A table change to modify the column comment.
*
* <p>It is equal to the following statement:
*
* <pre>
* ALTER TABLE <table_name> MODIFY <column_name> <original_column_type> COMMENT '<new_column_comment>'
* </pre>
*
* @param oldColumn the definition of the old column.
* @param newComment the modified comment.
* @return a TableChange represents the modification.
*/
static ModifyColumnComment modifyColumnComment(Column oldColumn, String newComment) {
return new ModifyColumnComment(oldColumn, newComment);
} | 3.68 |
flink_CatalogTable_of | /**
* Creates an instance of {@link CatalogTable} with a specific snapshot.
*
* @param schema unresolved schema
* @param comment optional comment
* @param partitionKeys list of partition keys or an empty list if not partitioned
* @param options options to configure the connector
* @param snapshot table snapshot of the table
*/
static CatalogTable of(
Schema schema,
@Nullable String comment,
List<String> partitionKeys,
Map<String, String> options,
@Nullable Long snapshot) {
return new DefaultCatalogTable(schema, comment, partitionKeys, options, snapshot);
} | 3.68 |
hadoop_TimelineEntity_addOtherInfo | /**
* Add a map of other information of the entity to the existing other info map
*
* @param otherInfo
* a map of other information
*/
public void addOtherInfo(Map<String, Object> otherInfo) {
this.otherInfo.putAll(otherInfo);
} | 3.68 |
hadoop_FederationStateStoreFacade_setPolicyConfiguration | /**
* Set a policy configuration into the state store.
*
* @param policyConf the policy configuration to set
* @throws YarnException if the request is invalid/fails
*/
public void setPolicyConfiguration(SubClusterPolicyConfiguration policyConf)
throws YarnException {
stateStore.setPolicyConfiguration(
SetSubClusterPolicyConfigurationRequest.newInstance(policyConf));
} | 3.68 |
hudi_HoodieTable_getSavepointTimestamps | /**
* Get the list of savepoint timestamps in this table.
*/
public Set<String> getSavepointTimestamps() {
return getCompletedSavepointTimeline().getInstantsAsStream().map(HoodieInstant::getTimestamp).collect(Collectors.toSet());
} | 3.68 |
dubbo_ExpiringMap_isRunning | /**
* get thread state
*
* @return thread state
*/
public boolean isRunning() {
return running;
} | 3.68 |
framework_ComboBox_getFilteredOptions | /**
* Filters the options in memory and returns the full filtered list.
*
* This can be less efficient than using container filters, so use
* {@link #getOptionsWithFilter(boolean)} if possible (filterable container
* and suitable item caption mode etc.).
*
* @return
*/
protected List<?> getFilteredOptions() {
if (!isFilteringNeeded()) {
prevfilterstring = null;
filteredOptions = new LinkedList<Object>(getItemIds());
return filteredOptions;
}
if (filterstring.equals(prevfilterstring)) {
return filteredOptions;
}
Collection<?> items;
if (prevfilterstring != null
&& filterstring.startsWith(prevfilterstring)) {
items = filteredOptions;
} else {
items = getItemIds();
}
prevfilterstring = filterstring;
filteredOptions = new LinkedList<Object>();
for (final Object itemId : items) {
String caption = getItemCaption(itemId);
if (caption == null || caption.equals("")) {
continue;
} else {
caption = caption.toLowerCase(getLocale());
}
switch (filteringMode) {
case CONTAINS:
if (caption.indexOf(filterstring) > -1) {
filteredOptions.add(itemId);
}
break;
case STARTSWITH:
default:
if (caption.startsWith(filterstring)) {
filteredOptions.add(itemId);
}
break;
}
}
return filteredOptions;
} | 3.68 |
hadoop_FederationStateStoreFacade_reinitialize | /**
* Delete and re-initialize the cache, to force it to use the given
* configuration.
*
* @param store the {@link FederationStateStore} instance to reinitialize with
* @param config the updated configuration to reinitialize with
*/
@VisibleForTesting
public synchronized void reinitialize(FederationStateStore store,
Configuration config) {
this.conf = config;
this.stateStore = store;
federationCache.clearCache();
federationCache.initCache(config, stateStore);
} | 3.68 |
morf_AbstractSqlDialectTest_testInsertFromSelectWithSourceAndJoinedInDifferentSchema | /**
* Tests that an insert from a select works when the source table and those in the join statement are in a different schema.
*/
@Test
public void testInsertFromSelectWithSourceAndJoinedInDifferentSchema() {
TableReference source = new TableReference("MYSCHEMA", TEST_TABLE);
TableReference sourceJoin = new TableReference("MYSCHEMA", ALTERNATE_TABLE);
SelectStatement sourceStmt = new SelectStatement(new FieldReference("id"),
new FieldReference("version"),
new FieldReference(STRING_FIELD),
new FieldReference(INT_FIELD),
new FieldReference(FLOAT_FIELD))
.from(source)
.innerJoin(sourceJoin, source.field(STRING_FIELD).eq(sourceJoin.field(STRING_FIELD)));
InsertStatement stmt = new InsertStatement().into(new TableReference(OTHER_TABLE))
.fields(new FieldReference("id"),
new FieldReference("version"),
new FieldReference(STRING_FIELD),
new FieldReference(INT_FIELD),
new FieldReference(FLOAT_FIELD))
.from(sourceStmt);
String expectedSql = "INSERT INTO " + tableName(OTHER_TABLE) + " (id, version, stringField, intField, floatField) SELECT id, version, stringField, intField, floatField FROM " + differentSchemaTableName(TEST_TABLE) + " INNER JOIN " + differentSchemaTableName(ALTERNATE_TABLE) + " ON (Test.stringField = Alternate.stringField)";
List<String> sql = testDialect.convertStatementToSQL(stmt, metadata, SqlDialect.IdTable.withDeterministicName(ID_VALUES_TABLE));
assertEquals("Insert with explicit field lists", ImmutableList.of(expectedSql), sql);
} | 3.68 |
AreaShop_TeleportFeature_cannotSpawnOn | /**
* Check if a player can spawn on here.
* @param material Material to check (assumed that this is below the feet)
* @return true when it is safe to spawn on top of, otherwise false
*/
private static boolean cannotSpawnOn(Material material) {
String name = material.name();
return name.equals("CACTUS")
|| name.contains("PISTON")
|| name.contains("SIGN")
|| name.contains("DOOR")
|| name.contains("PLATE")
|| name.contains("REDSTONE_LAMP")
|| name.contains("FENCE")
|| name.contains("GLASS_PANE") || name.contains("THIN_GLASS")
|| name.equals("DRAGON_EGG")
|| name.contains("MAGMA");
} | 3.68 |
hbase_HMaster_waitForMetaOnline | /**
* Check hbase:meta is up and ready for reading. For use during Master startup only.
* @return True if meta is UP and online and startup can progress. Otherwise, meta is not online
* and we will hold here until operator intervention.
*/
@InterfaceAudience.Private
public boolean waitForMetaOnline() {
return isRegionOnline(RegionInfoBuilder.FIRST_META_REGIONINFO);
} | 3.68 |
morf_AddIndex_reverse | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.upgrade.SchemaChange#reverse(org.alfasoftware.morf.metadata.Schema)
*/
@Override
public Schema reverse(Schema schema) {
Table original = schema.getTable(tableName);
List<String> indexeNames = new ArrayList<>();
boolean foundAndRemovedIndex = false;
for (Index index : original.indexes()) {
if (index.getName().equalsIgnoreCase(newIndex.getName())) {
foundAndRemovedIndex = true;
} else {
indexeNames.add(index.getName());
}
}
// Remove the index we are filtering
if (!foundAndRemovedIndex) {
throw new IllegalStateException("Error reversing AddIndex database change. Index [" + newIndex.getName() + "] not found in table [" + tableName + "] so it could not be reversed out");
}
return new TableOverrideSchema(schema, new AlteredTable(original, null, null, indexeNames, null));
} | 3.68 |
hbase_TsvImporterMapper_setup | /**
* Handles initializing this class with objects specific to it (i.e., the parser). Common
* initialization that might be leveraged by a subsclass is done in <code>doSetup</code>. Hence a
* subclass may choose to override this method and call <code>doSetup</code> as well before
* handling it's own custom params.
*/
@Override
protected void setup(Context context) {
doSetup(context);
conf = context.getConfiguration();
parser = new ImportTsv.TsvParser(conf.get(ImportTsv.COLUMNS_CONF_KEY), separator);
if (parser.getRowKeyColumnIndex() == -1) {
throw new RuntimeException("No row key column specified");
}
this.kvCreator = new CellCreator(conf);
tags = new ArrayList<>();
} | 3.68 |
hbase_IncrementalTableBackupClient_handleBulkLoad | /*
* Reads bulk load records from backup table, iterates through the records and forms the paths for
* bulk loaded hfiles. Copies the bulk loaded hfiles to backup destination
* @param sTableList list of tables to be backed up
* @return map of table to List of files
*/
@SuppressWarnings("unchecked")
protected Map<byte[], List<Path>>[] handleBulkLoad(List<TableName> sTableList)
throws IOException {
Map<byte[], List<Path>>[] mapForSrc = new Map[sTableList.size()];
List<String> activeFiles = new ArrayList<>();
List<String> archiveFiles = new ArrayList<>();
Pair<Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>>, List<byte[]>> pair =
backupManager.readBulkloadRows(sTableList);
Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>> map = pair.getFirst();
FileSystem tgtFs;
try {
tgtFs = FileSystem.get(new URI(backupInfo.getBackupRootDir()), conf);
} catch (URISyntaxException use) {
throw new IOException("Unable to get FileSystem", use);
}
Path rootdir = CommonFSUtils.getRootDir(conf);
Path tgtRoot = new Path(new Path(backupInfo.getBackupRootDir()), backupId);
for (Map.Entry<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>> tblEntry : map
.entrySet()) {
TableName srcTable = tblEntry.getKey();
int srcIdx = getIndex(srcTable, sTableList);
if (srcIdx < 0) {
LOG.warn("Couldn't find " + srcTable + " in source table List");
continue;
}
if (mapForSrc[srcIdx] == null) {
mapForSrc[srcIdx] = new TreeMap<>(Bytes.BYTES_COMPARATOR);
}
Path tblDir = CommonFSUtils.getTableDir(rootdir, srcTable);
Path tgtTable = new Path(new Path(tgtRoot, srcTable.getNamespaceAsString()),
srcTable.getQualifierAsString());
for (Map.Entry<String, Map<String, List<Pair<String, Boolean>>>> regionEntry : tblEntry
.getValue().entrySet()) {
String regionName = regionEntry.getKey();
Path regionDir = new Path(tblDir, regionName);
// map from family to List of hfiles
for (Map.Entry<String, List<Pair<String, Boolean>>> famEntry : regionEntry.getValue()
.entrySet()) {
String fam = famEntry.getKey();
Path famDir = new Path(regionDir, fam);
List<Path> files;
if (!mapForSrc[srcIdx].containsKey(Bytes.toBytes(fam))) {
files = new ArrayList<>();
mapForSrc[srcIdx].put(Bytes.toBytes(fam), files);
} else {
files = mapForSrc[srcIdx].get(Bytes.toBytes(fam));
}
Path archiveDir = HFileArchiveUtil.getStoreArchivePath(conf, srcTable, regionName, fam);
String tblName = srcTable.getQualifierAsString();
Path tgtFam = new Path(new Path(tgtTable, regionName), fam);
if (!tgtFs.mkdirs(tgtFam)) {
throw new IOException("couldn't create " + tgtFam);
}
for (Pair<String, Boolean> fileWithState : famEntry.getValue()) {
String file = fileWithState.getFirst();
int idx = file.lastIndexOf("/");
String filename = file;
if (idx > 0) {
filename = file.substring(idx + 1);
}
Path p = new Path(famDir, filename);
Path tgt = new Path(tgtFam, filename);
Path archive = new Path(archiveDir, filename);
if (fs.exists(p)) {
if (LOG.isTraceEnabled()) {
LOG.trace("found bulk hfile " + file + " in " + famDir + " for " + tblName);
}
if (LOG.isTraceEnabled()) {
LOG.trace("copying " + p + " to " + tgt);
}
activeFiles.add(p.toString());
} else if (fs.exists(archive)) {
LOG.debug("copying archive " + archive + " to " + tgt);
archiveFiles.add(archive.toString());
}
files.add(tgt);
}
}
}
}
copyBulkLoadedFiles(activeFiles, archiveFiles);
backupManager.deleteBulkLoadedRows(pair.getSecond());
return mapForSrc;
} | 3.68 |
hbase_YammerHistogramUtils_newHistogram | /**
* Create a new {@link com.codahale.metrics.Histogram} instance. These constructors are not public
* in 2.2.0, so we use reflection to find them.
*/
public static Histogram newHistogram(Reservoir sample) {
try {
Constructor<?> ctor = Histogram.class.getDeclaredConstructor(Reservoir.class);
ctor.setAccessible(true);
return (Histogram) ctor.newInstance(sample);
} catch (Exception e) {
throw new RuntimeException(e);
}
} | 3.68 |
hbase_RegionState_updateRitDuration | /**
* Update the duration of region in transition
* @param previousStamp previous RegionState's timestamp
*/
@InterfaceAudience.Private
void updateRitDuration(long previousStamp) {
this.ritDuration += (this.stamp - previousStamp);
} | 3.68 |
rocketmq-connect_WorkerSinkTask_receiveMessages | /**
* receive message from MQ.
*
* @param messages
*/
private void receiveMessages(List<MessageExt> messages) {
if (messageBatch.isEmpty()) {
originalOffsets.clear();
}
for (MessageExt message : messages) {
this.retryWithToleranceOperator.consumerRecord(message);
ConnectRecord connectRecord = convertMessages(message);
originalOffsets.put(
new MessageQueue(message.getTopic(), message.getBrokerName(), message.getQueueId()),
message.getQueueOffset() + 1
);
if (connectRecord != null && !this.retryWithToleranceOperator.failed()) {
messageBatch.add(connectRecord);
}
log.info("Received one message success : msgId {}", message.getMsgId());
}
try {
long start = System.currentTimeMillis();
sinkTask.put(new ArrayList<>(messageBatch));
//metrics
recordMultiple(messageBatch.size());
sinkTaskMetricsGroup.recordPut(System.currentTimeMillis() - start);
currentOffsets.putAll(originalOffsets);
messageBatch.clear();
if (!shouldPause()) {
if (pausedForRetry) {
resumeAll();
pausedForRetry = false;
}
}
} catch (RetriableException e) {
log.error("task {} put sink recode RetriableException", this, e.getMessage(), e);
// pause all consumer wait for put data
pausedForRetry = true;
pauseAll();
throw e;
} catch (Throwable t) {
log.error("task {} put sink recode Throwable", this, t.getMessage(), t);
throw t;
}
} | 3.68 |
morf_DropViewListener_deregisterAllViews | /**
*
* @return List of sql statements.
* @deprecated kept to ensure backwards compatibility.
*/
@Override
@Deprecated
public Iterable<String> deregisterAllViews() {
return ImmutableList.of();
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.