name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
framework_VTooltip_onBlur
/** * Hides Tooltip when the page is navigated with the keyboard. * * Removes the Tooltip from page to make sure assistive devices don't * recognize it by accident. */ @Override public void onBlur(BlurEvent be) { handledByFocus = false; handleHideEvent(); }
3.68
hbase_Procedure_beforeReplay
/** * Called when the procedure is loaded for replay. The procedure implementor may use this method * to perform some quick operation before replay. e.g. failing the procedure if the state on * replay may be unknown. */ protected void beforeReplay(TEnvironment env) { // no-op }
3.68
dubbo_AbstractDynamicConfiguration_getGroup
/** * Get the group from {@link URL the specified connection URL} * * @param url {@link URL the specified connection URL} * @return non-null * @since 2.7.8 */ protected static String getGroup(URL url) { String group = getParameter(url, GROUP_PARAM_NAME, null); return StringUtils.isBlank(group) ? getParameter(url, GROUP_KEY, DEFAULT_GROUP) : group; }
3.68
hadoop_SubApplicationRowKey_getRowKeyAsString
/** * Constructs a row key for the sub app table as follows: * <p> * {@code subAppUserId!clusterId! * entityType!entityIdPrefix!entityId!userId}. * * subAppUserId is usually the doAsUser. * userId is the yarn user that that the AM runs as. * * </p> * * @return String representation of row key. */ public String getRowKeyAsString() { return subAppRowKeyConverter.encodeAsString(this); }
3.68
hadoop_SuccessData_setSuccess
/** * Set the success flag. * @param success did the job succeed? */ public void setSuccess(boolean success) { this.success = success; }
3.68
framework_VAbstractSplitPanel_getSplitterPosition
/** * For internal use only. May be removed or replaced in the future. * * @return the current position of the split handle in either percentages or * pixels */ public String getSplitterPosition() { return position; }
3.68
querydsl_AbstractMongodbQuery_fetchFirst
/** * Fetch first with the specific fields * * @param paths fields to return * @return first result */ public K fetchFirst(Path<?>...paths) { queryMixin.setProjection(paths); return fetchFirst(); }
3.68
flink_NetUtils_getHostnameFromFQDN
/** * Turn a fully qualified domain name (fqdn) into a hostname. If the fqdn has multiple subparts * (separated by a period '.'), it will take the first part. Otherwise it takes the entire fqdn. * * @param fqdn The fully qualified domain name. * @return The hostname. */ public static String getHostnameFromFQDN(String fqdn) { if (fqdn == null) { throw new IllegalArgumentException("fqdn is null"); } int dotPos = fqdn.indexOf('.'); if (dotPos == -1) { return fqdn; } else { return fqdn.substring(0, dotPos); } }
3.68
querydsl_PathMetadataFactory_forCollectionAny
/** * Create a new PathMetadata instance for collection any access * * @param parent parent path * @return collection any path */ public static PathMetadata forCollectionAny(Path<?> parent) { return new PathMetadata(parent, "", PathType.COLLECTION_ANY); }
3.68
hudi_AvroSchemaCompatibility_getIncompatibilities
/** * If the compatibility is INCOMPATIBLE, returns {@link Incompatibility * Incompatibilities} found, otherwise an empty list. * * @return a list of {@link Incompatibility Incompatibilities}, may be empty, * never null. */ public List<Incompatibility> getIncompatibilities() { return mIncompatibilities; }
3.68
hadoop_RouterQuotaManager_getParentsContainingQuota
/** * Get parent paths (including itself) and quotas of the specified federation * path. Only parents containing quota are returned. * @param childPath Federated path. * @return TreeMap of parent paths and quotas. */ TreeMap<String, RouterQuotaUsage> getParentsContainingQuota( String childPath) { TreeMap<String, RouterQuotaUsage> res = new TreeMap<>(); readLock.lock(); try { Entry<String, RouterQuotaUsage> entry = this.cache.floorEntry(childPath); while (entry != null) { String mountPath = entry.getKey(); RouterQuotaUsage quota = entry.getValue(); if (isQuotaSet(quota) && isParentEntry(childPath, mountPath)) { res.put(mountPath, quota); } entry = this.cache.lowerEntry(mountPath); } return res; } finally { readLock.unlock(); } }
3.68
hbase_RegionStates_getRegionsInTransitionCount
/** * Get the number of regions in transition. */ public int getRegionsInTransitionCount() { return regionInTransition.size(); }
3.68
querydsl_BeanPath_as
/** * Cast the path to a subtype querytype * * @param <U> * @param clazz subtype class * @return subtype instance with the same identity */ @SuppressWarnings("unchecked") public <U extends BeanPath<? extends T>> U as(Class<U> clazz) { try { if (!casts.containsKey(clazz)) { PathMetadata metadata; if (pathMixin.getMetadata().getPathType() != PathType.COLLECTION_ANY) { metadata = PathMetadataFactory.forDelegate(pathMixin); } else { metadata = pathMixin.getMetadata(); } U rv; // the inits for the subtype will be wider, if it's a variable path if (inits != null && pathMixin.getMetadata().getPathType() != PathType.VARIABLE) { rv = clazz.getConstructor(PathMetadata.class, PathInits.class).newInstance(metadata, inits); } else { rv = clazz.getConstructor(PathMetadata.class).newInstance(metadata); } casts.put(clazz, rv); return rv; } else { return (U) casts.get(clazz); } } catch (InstantiationException | NoSuchMethodException | InvocationTargetException | IllegalAccessException e) { throw new ExpressionException(e.getMessage(), e); } }
3.68
hadoop_AMRMClientAsyncImpl_getClusterNodeCount
/** * Get the current number of nodes in the cluster. * A valid values is available after a call to allocate has been made * @return Current number of nodes in the cluster */ public int getClusterNodeCount() { return client.getClusterNodeCount(); }
3.68
flink_WebMonitorUtils_isFlinkRuntimeWebInClassPath
/** * Returns {@code true} if the optional dependency {@code flink-runtime-web} is in the * classpath. */ private static boolean isFlinkRuntimeWebInClassPath() { try { Class.forName(WEB_FRONTEND_BOOTSTRAP_CLASS_FQN); return true; } catch (ClassNotFoundException e) { // class not found means that there is no flink-runtime-web in the classpath return false; } }
3.68
framework_ThemeResource_getMIMEType
/** * @see com.vaadin.server.Resource#getMIMEType() */ @Override public String getMIMEType() { return FileTypeResolver.getMIMEType(getResourceId()); }
3.68
flink_FlinkJoinToMultiJoinRule_combinePostJoinFilters
/** * Combines the post-join filters from the left and right inputs (if they are MultiJoinRels) * into a single AND'd filter. * * @param joinRel the original LogicalJoin * @param left left child of the LogicalJoin * @param right right child of the LogicalJoin * @return combined post-join filters AND'd together */ private List<RexNode> combinePostJoinFilters(Join joinRel, RelNode left, RelNode right) { final List<RexNode> filters = new ArrayList<>(); if (right instanceof MultiJoin) { final MultiJoin multiRight = (MultiJoin) right; filters.add( shiftRightFilter(joinRel, left, multiRight, multiRight.getPostJoinFilter())); } if (left instanceof MultiJoin) { filters.add(((MultiJoin) left).getPostJoinFilter()); } return filters; }
3.68
flink_TopNBuffer_put
/** * Appends a record into the buffer. * * @param sortKey sort key with which the specified value is to be associated * @param value record which is to be appended * @return the size of the collection under the sortKey. */ public int put(RowData sortKey, RowData value) { currentTopNum += 1; // update treeMap Collection<RowData> collection = treeMap.get(sortKey); if (collection == null) { collection = valueSupplier.get(); treeMap.put(sortKey, collection); } collection.add(value); return collection.size(); }
3.68
framework_VComboBox_updatePlaceholder
/** * Update placeholder visibility (hidden when read-only or disabled). */ public void updatePlaceholder() { if (inputPrompt != null && enabled && !readonly) { tb.getElement().setAttribute("placeholder", inputPrompt); } else { tb.getElement().removeAttribute("placeholder"); } }
3.68
flink_CatalogColumnStatistics_copy
/** * Create a deep copy of "this" instance. * * @return a deep copy */ public CatalogColumnStatistics copy() { Map<String, CatalogColumnStatisticsDataBase> copy = CollectionUtil.newHashMapWithExpectedSize(columnStatisticsData.size()); for (Map.Entry<String, CatalogColumnStatisticsDataBase> entry : columnStatisticsData.entrySet()) { copy.put(entry.getKey(), entry.getValue().copy()); } return new CatalogColumnStatistics(copy, new HashMap<>(this.properties)); }
3.68
framework_UIDL_getIntArrayAttribute
/** * Gets the named attribute as an int array. * * @param name * the name of the attribute to get * @return the attribute value */ public int[] getIntArrayAttribute(final String name) { return attr().getIntArray(name); }
3.68
hadoop_RollingFileSystemSink_checkAppend
/** * Test whether the file system supports append and return the answer. * * @param fs the target file system */ private boolean checkAppend(FileSystem fs) { boolean canAppend = true; try { fs.append(basePath); } catch (UnsupportedOperationException ex) { canAppend = false; } catch (IOException ex) { // Ignore. The operation is supported. } return canAppend; }
3.68
framework_PropertyFilterDefinition_getDefaultFilter
/** * Get the default nested property filtering conditions. * * @return default property filter */ public static PropertyFilterDefinition getDefaultFilter() { return new PropertyFilterDefinition( BeanPropertySet.NestedBeanPropertyDefinition.MAX_PROPERTY_NESTING_DEPTH, Arrays.asList("java")); }
3.68
hbase_HttpServer_userHasAdministratorAccess
/** * Get the admin ACLs from the given ServletContext and check if the given user is in the ACL. * @param servletContext the context containing the admin ACL. * @param remoteUser the remote user to check for. * @return true if the user is present in the ACL, false if no ACL is set or the user is not * present */ public static boolean userHasAdministratorAccess(ServletContext servletContext, String remoteUser) { AccessControlList adminsAcl = (AccessControlList) servletContext.getAttribute(ADMINS_ACL); return userHasAdministratorAccess(adminsAcl, remoteUser); }
3.68
flink_TaskDeploymentDescriptor_getJobId
/** * Returns the task's job ID. * * @return the job ID this task belongs to */ public JobID getJobId() { return jobId; }
3.68
flink_JobGraphJobInformation_copyJobGraph
/** Returns a copy of a jobGraph that can be mutated. */ public JobGraph copyJobGraph() { return InstantiationUtil.cloneUnchecked(jobGraph); }
3.68
framework_GeneratedPropertyContainer_removeGeneratedProperty
/** * Removes any possible PropertyValueGenerator with given property id. Fires * a PropertySetChangeEvent. * * @param propertyId * property id */ public void removeGeneratedProperty(Object propertyId) { if (propertyGenerators.containsKey(propertyId)) { propertyGenerators.remove(propertyId); fireContainerPropertySetChange(); } }
3.68
querydsl_MetaDataExporter_setExportInverseForeignKeys
/** * Set whether inverse foreign keys should be exported * * @param exportInverseForeignKeys */ public void setExportInverseForeignKeys(boolean exportInverseForeignKeys) { this.exportInverseForeignKeys = exportInverseForeignKeys; }
3.68
pulsar_EtcdSessionWatcher_checkConnectionStatus
// task that runs every TICK_TIME to check Etcd connection private synchronized void checkConnectionStatus() { try { CompletableFuture<SessionEvent> future = new CompletableFuture<>(); client.getKVClient().get(ByteSequence.from("/".getBytes(StandardCharsets.UTF_8))) .thenRun(() -> { future.complete(SessionEvent.Reconnected); }).exceptionally(ex -> { future.complete(SessionEvent.ConnectionLost); return null; }); SessionEvent ectdClientState; try { ectdClientState = future.get(tickTimeMillis, TimeUnit.MILLISECONDS); } catch (TimeoutException e) { // Consider etcd disconnection if etcd operation takes more than TICK_TIME ectdClientState = SessionEvent.ConnectionLost; } checkState(ectdClientState); } catch (RejectedExecutionException | InterruptedException e) { task.cancel(true); } catch (Throwable t) { log.warn("Error while checking Etcd connection status", t); } }
3.68
hadoop_ListResultSchema_withPaths
/** * Set the paths value. * * @param paths the paths value to set * @return the ListSchema object itself. */ public ListResultSchema withPaths(final List<ListResultEntrySchema> paths) { this.paths = paths; return this; }
3.68
hbase_HtmlQuoting_needsQuoting
/** * Does the given string need to be quoted? * @param str the string to check * @return does the string contain any of the active html characters? */ public static boolean needsQuoting(String str) { if (str == null) { return false; } byte[] bytes = Bytes.toBytes(str); return needsQuoting(bytes, 0, bytes.length); }
3.68
hadoop_DiskBalancerDataNode_computeNodeDensity
/** * Computes nodes data density. * * This metric allows us to compare different nodes and how well the data is * spread across a set of volumes inside the node. */ public void computeNodeDensity() { double sum = 0; int volcount = 0; for (DiskBalancerVolumeSet vset : volumeSets.values()) { for (DiskBalancerVolume vol : vset.getVolumes()) { sum += Math.abs(vol.getVolumeDataDensity()); volcount++; } } nodeDataDensity = sum; this.volumeCount = volcount; }
3.68
flink_TableFactoryService_filter
/** Filters found factories by factory class and with matching context. */ private static <T extends TableFactory> List<T> filter( List<TableFactory> foundFactories, Class<T> factoryClass, Map<String, String> properties) { Preconditions.checkNotNull(factoryClass); Preconditions.checkNotNull(properties); List<T> classFactories = filterByFactoryClass(factoryClass, properties, foundFactories); List<T> contextFactories = filterByContext(factoryClass, properties, classFactories); return filterBySupportedProperties( factoryClass, properties, classFactories, contextFactories); }
3.68
flink_TypeTransformations_legacyToNonLegacy
/** Returns a type transformation that transforms LEGACY(...) type to a non-legacy type. */ public static TypeTransformation legacyToNonLegacy() { return LegacyToNonLegacyTransformation.INSTANCE; }
3.68
flink_AbstractMapSerializer_getKeySerializer
/** * Returns the serializer for the keys in the map. * * @return The serializer for the keys in the map. */ public TypeSerializer<K> getKeySerializer() { return keySerializer; }
3.68
framework_DropTargetExtensionConnector_onDragEnter
/** * Event handler for the {@code dragenter} event. * <p> * Override this method in case custom handling for the dragstart event is * required. If the drop is allowed, the event should prevent default. * * @param event * browser event to be handled */ protected void onDragEnter(Event event) { NativeEvent nativeEvent = (NativeEvent) event; // Generate style name for drop target styleDragCenter = dropTargetWidget.getStylePrimaryName() + STYLE_SUFFIX_DRAG_CENTER; if (isDropAllowed(nativeEvent)) { addDragOverStyle(nativeEvent); setDropEffect(nativeEvent); // According to spec, need to call this for allowing dropping, the // default action would be to reject as target event.preventDefault(); } else { // Remove drop effect nativeEvent.getDataTransfer() .setDropEffect(DataTransfer.DropEffect.NONE); } }
3.68
hbase_BackupInfo_getIncrTimestampMap
/** * Get new region server log timestamps after distributed log roll * @return new region server log timestamps */ public Map<TableName, Map<String, Long>> getIncrTimestampMap() { return this.incrTimestampMap; }
3.68
hbase_ScannerContext_limitReached
/** Returns true when the state indicates that a limit has been reached and scan should stop */ public boolean limitReached() { return this.limitReached; }
3.68
hbase_CellUtil_copyQualifierTo
/** * Copies the qualifier to the given bytebuffer * @param cell the cell whose qualifier has to be copied * @param destination the destination bytebuffer to which the qualifier has to be copied * @param destinationOffset the offset in the destination bytebuffer * @return the offset of the bytebuffer after the copy has happened */ public static int copyQualifierTo(Cell cell, ByteBuffer destination, int destinationOffset) { int qlen = cell.getQualifierLength(); if (cell instanceof ByteBufferExtendedCell) { ByteBufferUtils.copyFromBufferToBuffer( ((ByteBufferExtendedCell) cell).getQualifierByteBuffer(), destination, ((ByteBufferExtendedCell) cell).getQualifierPosition(), destinationOffset, qlen); } else { ByteBufferUtils.copyFromArrayToBuffer(destination, destinationOffset, cell.getQualifierArray(), cell.getQualifierOffset(), qlen); } return destinationOffset + qlen; }
3.68
hbase_CellCreator_create
/** * @param row row key * @param roffset row offset * @param rlength row length * @param family family name * @param foffset family offset * @param flength family length * @param qualifier column qualifier * @param qoffset qualifier offset * @param qlength qualifier length * @param timestamp version timestamp * @param value column value * @param voffset value offset * @param vlength value length * @return created Cell */ public Cell create(byte[] row, int roffset, int rlength, byte[] family, int foffset, int flength, byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset, int vlength, List<Tag> tags) throws IOException { return new KeyValue(row, roffset, rlength, family, foffset, flength, qualifier, qoffset, qlength, timestamp, KeyValue.Type.Put, value, voffset, vlength, tags); }
3.68
hmily_StringUtils_isEmpty
/** * Is empty boolean. * * @param array the array * @return the boolean */ public static boolean isEmpty(final Object[] array) { return array == null || array.length == 0; }
3.68
flink_SourceProvider_of
/** Helper method for creating a Source provider with a provided source parallelism. */ static SourceProvider of(Source<RowData, ?, ?> source, @Nullable Integer sourceParallelism) { return new SourceProvider() { @Override public Source<RowData, ?, ?> createSource() { return source; } @Override public boolean isBounded() { return Boundedness.BOUNDED.equals(source.getBoundedness()); } @Override public Optional<Integer> getParallelism() { return Optional.ofNullable(sourceParallelism); } }; }
3.68
hudi_CollectionUtils_zipToMap
/** * Zip two lists into a Map. Will throw Exception if the size is different between these two lists. */ public static <K, V> Map<K, V> zipToMap(List<K> keys, List<V> values) { checkArgument(keys.size() == values.size(), "keys' size must be equal with the values' size"); return IntStream.range(0, keys.size()).boxed().collect(Collectors.toMap(keys::get, values::get)); }
3.68
hbase_RequestConverter_buildIsSnapshotCleanupEnabledRequest
/** * Creates IsSnapshotCleanupEnabledRequest to determine if auto snapshot cleanup based on TTL * expiration is turned on */ public static IsSnapshotCleanupEnabledRequest buildIsSnapshotCleanupEnabledRequest() { return IsSnapshotCleanupEnabledRequest.newBuilder().build(); }
3.68
hadoop_BlobOperationDescriptor_getOperationType
/** * Gets the operation type of an Azure Storage operation. * * @param conn the connection object for the Azure Storage operation. * @return the operation type. */ static OperationType getOperationType(HttpURLConnection conn) { OperationType operationType = OperationType.Unknown; String method = conn.getRequestMethod(); String compValue = getQueryParameter(conn.getURL(), "comp"); if (method.equalsIgnoreCase("PUT")) { if (compValue != null) { switch (compValue) { case "metadata": operationType = OperationType.SetMetadata; break; case "properties": operationType = OperationType.SetProperties; break; case "block": operationType = OperationType.PutBlock; break; case "page": String pageWrite = conn.getRequestProperty("x-ms-page-write"); if (pageWrite != null && pageWrite.equalsIgnoreCase( "UPDATE")) { operationType = OperationType.PutPage; } break; case "appendblock": operationType = OperationType.AppendBlock; break; case "blocklist": operationType = OperationType.PutBlockList; break; default: break; } } else { String blobType = conn.getRequestProperty("x-ms-blob-type"); if (blobType != null && (blobType.equalsIgnoreCase("PageBlob") || blobType.equalsIgnoreCase("BlockBlob") || blobType.equalsIgnoreCase("AppendBlob"))) { operationType = OperationType.CreateBlob; } else if (blobType == null) { String resType = getQueryParameter(conn.getURL(), "restype"); if (resType != null && resType.equalsIgnoreCase("container")) { operationType = operationType.CreateContainer; } } } } else if (method.equalsIgnoreCase("GET")) { if (compValue != null) { switch (compValue) { case "list": operationType = OperationType.ListBlobs; break; case "metadata": operationType = OperationType.GetMetadata; break; case "blocklist": operationType = OperationType.GetBlockList; break; case "pagelist": operationType = OperationType.GetPageList; break; default: break; } } else if (conn.getRequestProperty("x-ms-range") != null) { operationType = OperationType.GetBlob; } } else if (method.equalsIgnoreCase("HEAD")) { operationType = OperationType.GetProperties; } else if (method.equalsIgnoreCase("DELETE")) { String resType = getQueryParameter(conn.getURL(), "restype"); if (resType != null && resType.equalsIgnoreCase("container")) { operationType = operationType.DeleteContainer; } else { operationType = OperationType.DeleteBlob; } } return operationType; }
3.68
hadoop_OBSCommonUtils_createFileStatus
/** * Create a files status instance from a listing. * * @param keyPath path to entry * @param summary summary from OBS * @param blockSize block size to declare. * @param owner owner of the file * @return a status entry */ static OBSFileStatus createFileStatus( final Path keyPath, final ObsObject summary, final long blockSize, final String owner) { if (objectRepresentsDirectory( summary.getObjectKey(), summary.getMetadata().getContentLength())) { return new OBSFileStatus(keyPath, owner); } else { return new OBSFileStatus( summary.getMetadata().getContentLength(), dateToLong(summary.getMetadata().getLastModified()), keyPath, blockSize, owner); } }
3.68
framework_FileDropEvent_getFiles
/** * Gets the collection of files dropped onto the file drop target component. * * @return Collection of files that were dropped onto the file drop target * component. */ public Collection<Html5File> getFiles() { return files; }
3.68
framework_VaadinService_handleRequest
/** * Handles the incoming request and writes the response into the response * object. Uses {@link #getRequestHandlers()} for handling the request. * <p> * If a session expiration is detected during request handling then each * {@link RequestHandler request handler} has an opportunity to handle the * expiration event if it implements {@link SessionExpiredHandler}. If no * request handler handles session expiration a default expiration message * will be written. * </p> * * @param request * The incoming request * @param response * The outgoing response * @throws ServiceException * Any exception that occurs during response handling will be * wrapped in a ServiceException */ public void handleRequest(VaadinRequest request, VaadinResponse response) throws ServiceException { requestStart(request, response); VaadinSession vaadinSession = null; try { // Find out the service session this request is related to vaadinSession = findVaadinSession(request); if (vaadinSession == null) { return; } for (RequestHandler handler : getRequestHandlers()) { if (handler.handleRequest(vaadinSession, request, response)) { return; } } // Request not handled by any RequestHandler response.sendError(HttpServletResponse.SC_NOT_FOUND, "Request was not handled by any registered handler."); } catch (final SessionExpiredException e) { handleSessionExpired(request, response); } catch (final Throwable e) { handleExceptionDuringRequest(request, response, vaadinSession, e); } finally { requestEnd(request, response, vaadinSession); } }
3.68
hadoop_ContainerLogContext_getContainerType
/** * Get {@link ContainerType} the type of the container. * * @return the type of the container */ public ContainerType getContainerType() { return containerType; }
3.68
flink_JMXReporter_replaceInvalidChars
/** * Lightweight method to replace unsupported characters. If the string does not contain any * unsupported characters, this method creates no new string (and in fact no new objects at * all). * * <p>Replacements: * * <ul> * <li>{@code "} is removed * <li>{@code space} is replaced by {@code _} (underscore) * <li>{@code , = ; : ? ' *} are replaced by {@code -} (hyphen) * </ul> */ static String replaceInvalidChars(String str) { char[] chars = null; final int strLen = str.length(); int pos = 0; for (int i = 0; i < strLen; i++) { final char c = str.charAt(i); switch (c) { case '>': case '<': case '"': // remove character by not moving cursor if (chars == null) { chars = str.toCharArray(); } break; case ' ': if (chars == null) { chars = str.toCharArray(); } chars[pos++] = '_'; break; case ',': case '=': case ';': case ':': case '?': case '\'': case '*': if (chars == null) { chars = str.toCharArray(); } chars[pos++] = '-'; break; default: if (chars != null) { chars[pos] = c; } pos++; } } return chars == null ? str : new String(chars, 0, pos); }
3.68
hudi_HoodieBloomIndex_loadColumnRangesFromFiles
/** * Load all involved files as <Partition, filename> pair List. */ List<Pair<String, BloomIndexFileInfo>> loadColumnRangesFromFiles( List<String> partitions, final HoodieEngineContext context, final HoodieTable hoodieTable) { // Obtain the latest data files from all the partitions. List<Pair<String, Pair<String, HoodieBaseFile>>> partitionPathFileIDList = getLatestBaseFilesForAllPartitions(partitions, context, hoodieTable).stream() .map(pair -> Pair.of(pair.getKey(), Pair.of(pair.getValue().getFileId(), pair.getValue()))) .collect(toList()); context.setJobStatus(this.getClass().getName(), "Obtain key ranges for file slices (range pruning=on): " + config.getTableName()); return context.map(partitionPathFileIDList, pf -> { try { HoodieRangeInfoHandle rangeInfoHandle = new HoodieRangeInfoHandle(config, hoodieTable, Pair.of(pf.getKey(), pf.getValue().getKey())); String[] minMaxKeys = rangeInfoHandle.getMinMaxKeys(pf.getValue().getValue()); return Pair.of(pf.getKey(), new BloomIndexFileInfo(pf.getValue().getKey(), minMaxKeys[0], minMaxKeys[1])); } catch (MetadataNotFoundException me) { LOG.warn("Unable to find range metadata in file :" + pf); return Pair.of(pf.getKey(), new BloomIndexFileInfo(pf.getValue().getKey())); } }, Math.max(partitionPathFileIDList.size(), 1)); }
3.68
framework_AbstractStringToNumberConverter_getFormat
/** * Returns the format used by {@link #convertToPresentation(Object, Locale)} * and {@link #convertToModel(Object, Locale)}. * * @param locale * The locale to use * @return A NumberFormat instance * @since 7.1 */ protected NumberFormat getFormat(Locale locale) { if (locale == null) { locale = Locale.getDefault(); } return NumberFormat.getNumberInstance(locale); }
3.68
dubbo_DubboConfigDefaultPropertyValueBeanPostProcessor_getOrder
/** * @return Higher than {@link InitDestroyAnnotationBeanPostProcessor#getOrder()} * @see InitDestroyAnnotationBeanPostProcessor * @see CommonAnnotationBeanPostProcessor * @see PostConstruct */ @Override public int getOrder() { return Ordered.LOWEST_PRECEDENCE + 1; }
3.68
hbase_SegmentScanner_realSeekDone
/** * This scanner is working solely on the in-memory MemStore and doesn't work on store files, * MutableCellSetSegmentScanner always does the seek, therefore always returning true. */ @Override public boolean realSeekDone() { return true; }
3.68
hbase_HFileContentValidator_validateHFileContent
/** * Check HFile contents are readable by HBase 2. * @param conf used configuration * @return number of HFiles corrupted HBase * @throws IOException if a remote or network exception occurs */ private boolean validateHFileContent(Configuration conf) throws IOException { FileSystem fileSystem = CommonFSUtils.getCurrentFileSystem(conf); ExecutorService threadPool = createThreadPool(conf); HFileCorruptionChecker checker; try { checker = new HFileCorruptionChecker(conf, threadPool, false); Path rootDir = CommonFSUtils.getRootDir(conf); LOG.info("Validating HFile contents under {}", rootDir); Collection<Path> tableDirs = FSUtils.getTableDirs(fileSystem, rootDir); checker.checkTables(tableDirs); Path archiveRootDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY); LOG.info("Validating HFile contents under {}", archiveRootDir); List<Path> archiveTableDirs = FSUtils.getTableDirs(fileSystem, archiveRootDir); checker.checkTables(archiveTableDirs); } finally { threadPool.shutdown(); try { threadPool.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } int checkedFiles = checker.getHFilesChecked(); Collection<Path> corrupted = checker.getCorrupted(); if (corrupted.isEmpty()) { LOG.info("Checked {} HFiles, none of them are corrupted.", checkedFiles); LOG.info("There are no incompatible HFiles."); return true; } else { LOG.info("Checked {} HFiles, {} are corrupted.", checkedFiles, corrupted.size()); for (Path path : corrupted) { LOG.info("Corrupted file: {}", path); } LOG.info("Change data block encodings before upgrading. " + "Check https://s.apache.org/prefixtree for instructions."); return false; } }
3.68
flink_KeyedStateCheckpointOutputStream_isKeyGroupAlreadyFinished
/** * Returns true if the key group is already completely written and immutable. It was started and * since then another key group has been started. */ public boolean isKeyGroupAlreadyFinished(int keyGroupId) { return isKeyGroupAlreadyStarted(keyGroupId) && keyGroupId != getCurrentKeyGroup(); }
3.68
dubbo_AbstractAnnotationBeanPostProcessor_prepareInjection
/** * Prepare injection data after found injection elements * * @param metadata * @throws Exception */ protected void prepareInjection(AnnotatedInjectionMetadata metadata) throws Exception {}
3.68
hudi_HoodieFlinkCopyOnWriteTable_insertPrepped
/** * Inserts the given prepared records into the Hoodie table, at the supplied instantTime. * * <p>This implementation requires that the input records are already tagged, and de-duped if needed. * * <p>Specifies the write handle explicitly in order to have fine-grained control with * the underneath file. * * @param context HoodieEngineContext * @param instantTime Instant Time for the action * @param preppedRecords Hoodie records to insert * @return HoodieWriteMetadata */ public HoodieWriteMetadata<List<WriteStatus>> insertPrepped( HoodieEngineContext context, HoodieWriteHandle<?, ?, ?, ?> writeHandle, String instantTime, List<HoodieRecord<T>> preppedRecords) { return new FlinkInsertPreppedCommitActionExecutor<>(context, writeHandle, config, this, instantTime, preppedRecords).execute(); }
3.68
hadoop_HttpReferrerAuditHeader_withSpanId
/** * Set ID. * @param value new value * @return the builder */ public Builder withSpanId(final String value) { spanId = value; return this; }
3.68
hadoop_WriteOperationHelper_createPutObjectRequest
/** * Create a {@link PutObjectRequest} request against the specific key. * @param destKey destination key * @param length size, if known. Use -1 for not known * @param options options for the request * @param isFile is data to be uploaded a file * @return the request */ @Retries.OnceRaw public PutObjectRequest createPutObjectRequest(String destKey, long length, final PutObjectOptions options, boolean isFile) { activateAuditSpan(); return getRequestFactory() .newPutObjectRequestBuilder(destKey, options, length, false) .build(); }
3.68
flink_StaticFileServerHandler_setDateHeader
/** * Sets the "date" header for the HTTP response. * * @param response HTTP response */ public static void setDateHeader(FullHttpResponse response) { SimpleDateFormat dateFormatter = new SimpleDateFormat(HTTP_DATE_FORMAT, Locale.US); dateFormatter.setTimeZone(GMT_TIMEZONE); Calendar time = new GregorianCalendar(); response.headers().set(DATE, dateFormatter.format(time.getTime())); }
3.68
framework_VAbstractCalendarPanel_onMouseUp
/* * (non-Javadoc) * * @see * com.google.gwt.event.dom.client.MouseUpHandler#onMouseUp(com.google.gwt * .event.dom.client.MouseUpEvent) */ @Override public void onMouseUp(MouseUpEvent event) { if (mouseTimer != null) { mouseTimer.cancel(); } }
3.68
flink_EnvironmentInformation_getJvmStartupOptions
/** * Gets the system parameters and environment parameters that were passed to the JVM on startup. * * @return The options passed to the JVM on startup. */ public static String getJvmStartupOptions() { try { final RuntimeMXBean bean = ManagementFactory.getRuntimeMXBean(); final StringBuilder bld = new StringBuilder(); for (String s : bean.getInputArguments()) { bld.append(s).append(' '); } return bld.toString(); } catch (Throwable t) { return UNKNOWN; } }
3.68
framework_MenuBar_scrollItemIntoView
/* * Scroll the specified item into view. */ private void scrollItemIntoView(MenuItem item) { if (item != null) { item.getElement().scrollIntoView(); } }
3.68
framework_AbstractInMemoryContainer_passesFilters
/** * Checks if the given itemId passes the filters set for the container. The * caller should make sure the itemId exists in the container. For * non-existing itemIds the behavior is undefined. * * @param itemId * An itemId that exists in the container. * @return true if the itemId passes all filters or no filters are set, * false otherwise. */ protected boolean passesFilters(Object itemId) { ITEMCLASS item = getUnfilteredItem(itemId); if (getFilters().isEmpty()) { return true; } for (final Filter f : getFilters()) { if (!f.passesFilter(itemId, item)) { return false; } } return true; }
3.68
framework_MultiSelectionEvent_getSource
/** * The multiselect on which the Event initially occurred. * * @return the multiselect on which the Event initially occurred. */ @Override public MultiSelect<T> getSource() { return (MultiSelect<T>) super.getSource(); }
3.68
graphhopper_HeadingEdgeFilter_getHeadingOfGeometryNearPoint
/** * Calculates the heading (in degrees) of the given edge in fwd direction near the given point. If the point is * too far away from the edge (according to the maxDistance parameter) it returns Double.NaN. */ static double getHeadingOfGeometryNearPoint(EdgeIteratorState edgeState, GHPoint point, double maxDistance) { final DistanceCalc calcDist = DistanceCalcEarth.DIST_EARTH; double closestDistance = Double.POSITIVE_INFINITY; PointList points = edgeState.fetchWayGeometry(FetchMode.ALL); int closestPoint = -1; for (int i = 1; i < points.size(); i++) { double fromLat = points.getLat(i - 1), fromLon = points.getLon(i - 1); double toLat = points.getLat(i), toLon = points.getLon(i); // the 'distance' between the point and an edge segment is either the vertical distance to the segment or // the distance to the closer one of the two endpoints. here we save one call to calcDist per segment, // because each endpoint appears in two segments (except the first and last). double distance = calcDist.validEdgeDistance(point.lat, point.lon, fromLat, fromLon, toLat, toLon) ? calcDist.calcDenormalizedDist(calcDist.calcNormalizedEdgeDistance(point.lat, point.lon, fromLat, fromLon, toLat, toLon)) : calcDist.calcDist(fromLat, fromLon, point.lat, point.lon); if (i == points.size() - 1) distance = Math.min(distance, calcDist.calcDist(toLat, toLon, point.lat, point.lon)); if (distance > maxDistance) continue; if (distance < closestDistance) { closestDistance = distance; closestPoint = i; } } if (closestPoint < 0) return Double.NaN; double fromLat = points.getLat(closestPoint - 1), fromLon = points.getLon(closestPoint - 1); double toLat = points.getLat(closestPoint), toLon = points.getLon(closestPoint); return AngleCalc.ANGLE_CALC.calcAzimuth(fromLat, fromLon, toLat, toLon); }
3.68
querydsl_QueryBase_limit
/** * Defines the limit / max results for the query results * * @param limit max rows * @return the current object */ public Q limit(@Range(from = 0, to = Integer.MAX_VALUE) long limit) { return queryMixin.limit(limit); }
3.68
pulsar_MetadataStoreFactoryImpl_removeIdentifierFromMetadataURL
/** * Removes the identifier from the full metadata url. * * zk:my-zk:3000 -> my-zk:3000 * etcd:my-etcd:3000 -> my-etcd:3000 * my-default-zk:3000 -> my-default-zk:3000 * @param metadataURL * @return */ public static String removeIdentifierFromMetadataURL(String metadataURL) { MetadataStoreProvider provider = findProvider(metadataURL); if (metadataURL.startsWith(provider.urlScheme() + ":")) { return metadataURL.substring(provider.urlScheme().length() + 1); } return metadataURL; }
3.68
hadoop_Validate_checkPathExistsAsFile
/** * Validates that the given path exists and is a file. * @param path the path to check. * @param argName the name of the argument being validated. */ public static void checkPathExistsAsFile(Path path, String argName) { checkPathExists(path, argName); checkArgument(Files.isRegularFile(path), "Path %s (%s) must point to a file.", argName, path); }
3.68
hbase_MasterCoprocessorHost_preSplitAfterMETAAction
/** * This will be called after update META step as part of split table region procedure. * @param user the user */ public void preSplitAfterMETAAction(final User user) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override public void call(MasterObserver observer) throws IOException { observer.preSplitRegionAfterMETAAction(this); } }); }
3.68
framework_ListSet_add
/** * Works as java.util.ArrayList#add(int, java.lang.Object) but returns * immediately if the element is already in the ListSet. */ @Override public void add(int index, E element) { if (contains(element)) { // Duplicates are not allowed return; } super.add(index, element); itemSet.add(element); }
3.68
flink_BlobServer_getStorageLocation
/** * Returns a file handle to the file associated with the given blob key on the blob server. * * <p><strong>This is only called from {@link BlobServerConnection} or unit tests.</strong> * * @param jobId ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated) * @param key identifying the file * @return file handle to the file * @throws IOException if creating the directory fails */ @VisibleForTesting public File getStorageLocation(@Nullable JobID jobId, BlobKey key) throws IOException { return BlobUtils.getStorageLocation(storageDir.deref(), jobId, key); }
3.68
hadoop_FederationStateStoreFacade_removeStoredToken
/** * The Router Supports Remove RMDelegationTokenIdentifier{@link RMDelegationTokenIdentifier}. * * @param identifier delegation tokens from the RM * @throws YarnException if the call to the state store is unsuccessful * @throws IOException An IO Error occurred */ public void removeStoredToken(RMDelegationTokenIdentifier identifier) throws YarnException, IOException{ LOG.info("removing RMDelegation token with sequence number: {}.", identifier.getSequenceNumber()); RouterStoreToken storeToken = RouterStoreToken.newInstance(identifier, 0L); RouterRMTokenRequest request = RouterRMTokenRequest.newInstance(storeToken); stateStore.removeStoredToken(request); }
3.68
querydsl_ExpressionUtils_distinctList
/** * Create a distinct list of the concatenated array contents * * @param args elements * @return list with distinct elements */ public static List<Expression<?>> distinctList(Expression<?>[]... args) { final Set<Expression<?>> set = new LinkedHashSet<>(); for (Expression<?>[] arr : args) { Collections.addAll(set, arr); } return CollectionUtils.unmodifiableList(new ArrayList<>(set)); }
3.68
framework_AbstractSelect_getItemCaptionMode
/** * Gets the item caption mode. * * <p> * The mode can be one of the following ones: * <ul> * <li><code>ITEM_CAPTION_MODE_EXPLICIT_DEFAULTS_ID</code> : Items * Id-objects <code>toString</code> is used as item caption. If caption is * explicitly specified, it overrides the id-caption. * <li><code>ITEM_CAPTION_MODE_ID</code> : Items Id-objects * <code>toString</code> is used as item caption.</li> * <li><code>ITEM_CAPTION_MODE_ITEM</code> : Item-objects * <code>toString</code> is used as item caption.</li> * <li><code>ITEM_CAPTION_MODE_INDEX</code> : The index of the item is used * as item caption. The index mode can only be used with the containers * implementing <code>Container.Indexed</code> interface.</li> * <li><code>ITEM_CAPTION_MODE_EXPLICIT</code> : The item captions must be * explicitly specified.</li> * <li><code>ITEM_CAPTION_MODE_PROPERTY</code> : The item captions are read * from property, that must be specified with * <code>setItemCaptionPropertyId</code>.</li> * </ul> * The <code>ITEM_CAPTION_MODE_EXPLICIT_DEFAULTS_ID</code> is the default * mode. * </p> * * @return the One of the modes listed above. */ public ItemCaptionMode getItemCaptionMode() { return itemCaptionMode; }
3.68
dubbo_Bytes_bytes2hex
/** * to hex string. * * @param bs byte array. * @param off offset. * @param len length. * @return hex string. */ public static String bytes2hex(byte[] bs, int off, int len) { if (off < 0) { throw new IndexOutOfBoundsException("bytes2hex: offset < 0, offset is " + off); } if (len < 0) { throw new IndexOutOfBoundsException("bytes2hex: length < 0, length is " + len); } if (off + len > bs.length) { throw new IndexOutOfBoundsException("bytes2hex: offset + length > array length."); } byte b; int r = off, w = 0; char[] cs = new char[len * 2]; for (int i = 0; i < len; i++) { b = bs[r++]; cs[w++] = BASE16[b >> 4 & MASK4]; cs[w++] = BASE16[b & MASK4]; } return new String(cs); }
3.68
flink_DataSink_sortLocalOutput
/** * Sorts each local partition of a data set on the field(s) specified by the field expression in * the specified {@link Order} before it is emitted by the output format. * * <p><b>Note: Non-composite types can only be sorted on the full element which is specified by * a wildcard expression ("*" or "_").</b> * * <p>Data sets of composite types (Tuple or Pojo) can be sorted on multiple fields in different * orders by chaining {@link #sortLocalOutput(String, Order)} calls. * * @param fieldExpression The field expression for the field(s) on which the data set is locally * sorted. * @param order The Order in which the specified field(s) are locally sorted. * @return This data sink operator with specified output order. * @see Order * @deprecated Use {@link DataSet#sortPartition(String, Order)} instead */ @Deprecated @PublicEvolving public DataSink<T> sortLocalOutput(String fieldExpression, Order order) { int numFields; int[] fields; Order[] orders; // compute flat field positions for (nested) sorting fields Keys.ExpressionKeys<T> ek = new Keys.ExpressionKeys<>(fieldExpression, this.type); fields = ek.computeLogicalKeyPositions(); if (!Keys.ExpressionKeys.isSortKey(fieldExpression, this.type)) { throw new InvalidProgramException("Selected sort key is not a sortable type"); } numFields = fields.length; orders = new Order[numFields]; Arrays.fill(orders, order); if (this.sortKeyPositions == null) { // set sorting info this.sortKeyPositions = fields; this.sortOrders = orders; } else { // append sorting info to existing info int oldLength = this.sortKeyPositions.length; int newLength = oldLength + numFields; this.sortKeyPositions = Arrays.copyOf(this.sortKeyPositions, newLength); this.sortOrders = Arrays.copyOf(this.sortOrders, newLength); for (int i = 0; i < numFields; i++) { this.sortKeyPositions[oldLength + i] = fields[i]; this.sortOrders[oldLength + i] = orders[i]; } } return this; }
3.68
AreaShop_GithubUpdateCheck_getCurrentVersion
/** * Get the current version. * @return Current version of the plugin */ public String getCurrentVersion() { return currentVersion; }
3.68
hbase_ByteBuffInputStream_read
/** * Reads up to next <code>len</code> bytes of data from buffer into passed array(starting from * given offset). * @param b the array into which the data is read. * @param off the start offset in the destination array <code>b</code> * @param len the maximum number of bytes to read. * @return the total number of bytes actually read into the buffer, or <code>-1</code> if not even * 1 byte can be read because the end of the stream has been reached. */ @Override public int read(byte b[], int off, int len) { int avail = available(); if (avail <= 0) { return -1; } if (len <= 0) { return 0; } if (len > avail) { len = avail; } this.buf.get(b, off, len); return len; }
3.68
hbase_SnapshotDescriptionUtils_isSnapshotOwner
/** * Check if the user is this table snapshot's owner * @param snapshot the table snapshot description * @param user the user * @return true if the user is the owner of the snapshot, false otherwise or the snapshot owner * field is not present. */ public static boolean isSnapshotOwner(org.apache.hadoop.hbase.client.SnapshotDescription snapshot, User user) { if (user == null) return false; return user.getShortName().equals(snapshot.getOwner()); }
3.68
framework_VAbstractPopupCalendar_setRangeStart
/** * Sets the start range for this component. The start range is inclusive, * and it depends on the current resolution, what is considered inside the * range. * * @param rangeStart * - the allowed range's start date */ public void setRangeStart(String rangeStart) { calendar.setRangeStart(rangeStart); }
3.68
morf_XmlPullProcessor_readTag
/** * Reads the next tag from the pull parser and throws an exception if its name does not * match <var>expectedTagName</var>. * * @param xmlStreamReader The pull parser to read from * @param expectedTagName The tag name expected */ public static void readTag(XMLStreamReader xmlStreamReader, String expectedTagName) { // Look for any start tag event int event; try { do { event = xmlStreamReader.next(); } while (event == XMLStreamReader.CHARACTERS || event == XMLStreamReader.END_ELEMENT || event == XMLStreamReader.COMMENT); } catch (Exception e) { throw new RuntimeException("Error reading data from the XML pull parser", e); } if (event == XMLStreamReader.START_ELEMENT) { if (!expectedTagName.equals(xmlStreamReader.getLocalName())) { throw new IllegalArgumentException("Expected tag [" + expectedTagName + "] but got [" + xmlStreamReader.getLocalName() + "]"); } } else if (event == XMLStreamReader.END_DOCUMENT) { throw new IllegalStateException("Unexpected end of document while looking for tag [" + expectedTagName + "]"); } else { throw new IllegalStateException("Expecting a tag but found [" + event + "]"); } }
3.68
hudi_HoodieAsyncService_waitTillPendingAsyncServiceInstantsReducesTo
/** * Wait till outstanding pending compaction/clustering reduces to the passed in value. * * @param numPending Maximum pending compactions/clustering allowed * @throws InterruptedException */ public void waitTillPendingAsyncServiceInstantsReducesTo(int numPending) throws InterruptedException { try { queueLock.lock(); while (!isShutdown() && !hasError() && (pendingInstants.size() > numPending)) { consumed.await(POLLING_SECONDS, TimeUnit.SECONDS); } } finally { queueLock.unlock(); } }
3.68
flink_LongValue_toString
/* * (non-Javadoc) * @see java.lang.Object#toString() */ @Override public String toString() { return String.valueOf(this.value); }
3.68
flink_Predicates_exactlyOneOf
/** * Returns a {@link DescribedPredicate} that returns true if one and only one of the given * predicates match. */ @SafeVarargs public static <T> DescribedPredicate<T> exactlyOneOf( final DescribedPredicate<? super T>... other) { return DescribedPredicate.describe( "only one of the following predicates match:\n" + Arrays.stream(other) .map(dp -> "* " + dp + "\n") .collect(Collectors.joining()), t -> Arrays.stream(other).map(dp -> dp.test(t)).reduce(false, Boolean::logicalXor)); }
3.68
framework_WindowElement_move
/** * Moves the window by given offset. * * @param xOffset * x offset * @param yOffset * y offset */ public void move(int xOffset, int yOffset) { Actions action = new Actions(getDriver()); action.moveToElement( findElement(org.openqa.selenium.By.className("v-window-wrap")), 5, 5); action.clickAndHold(); action.moveByOffset(xOffset, yOffset); action.release(); action.build().perform(); }
3.68
framework_Upload_getMIMEType
/** * Gets the MIME Type of the file. * * @return the MIME type. */ public String getMIMEType() { return type; }
3.68
flink_LeaderRetriever_getLeaderFuture
/** Returns the current JobManagerGateway future. */ public CompletableFuture<Tuple2<String, UUID>> getLeaderFuture() { return atomicLeaderFuture.get(); }
3.68
flink_LocalFileSystem_delete
/** * Deletes the given file or directory. * * @param f the file to be deleted * @return <code>true</code> if all files were deleted successfully, <code>false</code> * otherwise * @throws IOException thrown if an error occurred while deleting the files/directories */ private boolean delete(final File f) throws IOException { if (f.isDirectory()) { final File[] files = f.listFiles(); if (files != null) { for (File file : files) { final boolean del = delete(file); if (!del) { return false; } } } } else { return f.delete(); } // Now directory is empty return f.delete(); }
3.68
hadoop_CommitContext_isCollectIOStatistics
/** * Collecting thread level IO statistics? * @return true if thread level IO stats should be collected. */ public boolean isCollectIOStatistics() { return collectIOStatistics; }
3.68
hadoop_RolePolicies_bucketObjectsToArn
/** * From an S3 bucket name, build an ARN to refer to all objects in * it. * @param bucket bucket name. * @return return the ARN to use in statements. */ public static String bucketObjectsToArn(String bucket) { return String.format("arn:aws:s3:::%s/*", bucket); }
3.68
hadoop_FederationStateStoreFacade_getSubClusters
/** * Updates the cache with the central {@link FederationStateStore} and returns * the {@link SubClusterInfo} of all active sub cluster(s). * * @param filterInactiveSubClusters whether to filter out inactive * sub-clusters * @param flushCache flag to indicate if the cache should be flushed or not * @return the sub cluster information * @throws YarnException if the call to the state store is unsuccessful */ public Map<SubClusterId, SubClusterInfo> getSubClusters( final boolean filterInactiveSubClusters, final boolean flushCache) throws YarnException { if (flushCache && federationCache.isCachingEnabled()) { LOG.info("Flushing subClusters from cache and rehydrating from store."); federationCache.removeSubCluster(flushCache); } return getSubClusters(filterInactiveSubClusters); }
3.68
hadoop_SelectBinding_buildCsvInput
/** * Build the CSV input format for a request. * @param ownerConf FS owner configuration * @param builderOptions options on the specific request * @return the input format * @throws IllegalArgumentException argument failure * @throws IOException validation failure */ public InputSerialization buildCsvInput( final Configuration ownerConf, final Configuration builderOptions) throws IllegalArgumentException, IOException { String headerInfo = opt(builderOptions, ownerConf, CSV_INPUT_HEADER, CSV_INPUT_HEADER_OPT_DEFAULT, true).toUpperCase(Locale.ENGLISH); String commentMarker = xopt(builderOptions, ownerConf, CSV_INPUT_COMMENT_MARKER, CSV_INPUT_COMMENT_MARKER_DEFAULT); String fieldDelimiter = xopt(builderOptions, ownerConf, CSV_INPUT_INPUT_FIELD_DELIMITER, CSV_INPUT_FIELD_DELIMITER_DEFAULT); String recordDelimiter = xopt(builderOptions, ownerConf, CSV_INPUT_RECORD_DELIMITER, CSV_INPUT_RECORD_DELIMITER_DEFAULT); String quoteCharacter = xopt(builderOptions, ownerConf, CSV_INPUT_QUOTE_CHARACTER, CSV_INPUT_QUOTE_CHARACTER_DEFAULT); String quoteEscapeCharacter = xopt(builderOptions, ownerConf, CSV_INPUT_QUOTE_ESCAPE_CHARACTER, CSV_INPUT_QUOTE_ESCAPE_CHARACTER_DEFAULT); // CSV input CSVInput.Builder csvBuilder = CSVInput.builder() .fieldDelimiter(fieldDelimiter) .recordDelimiter(recordDelimiter) .comments(commentMarker) .quoteCharacter(quoteCharacter); if (StringUtils.isNotEmpty(quoteEscapeCharacter)) { csvBuilder.quoteEscapeCharacter(quoteEscapeCharacter); } csvBuilder.fileHeaderInfo(headerInfo); InputSerialization.Builder inputSerialization = InputSerialization.builder() .csv(csvBuilder.build()); String compression = opt(builderOptions, ownerConf, SELECT_INPUT_COMPRESSION, COMPRESSION_OPT_NONE, true).toUpperCase(Locale.ENGLISH); if (isNotEmpty(compression)) { inputSerialization.compressionType(compression); } return inputSerialization.build(); }
3.68
framework_Calendar_initCalendarWithLocale
/** * Initialize the java calendar instance with the current locale and * timezone. */ private void initCalendarWithLocale() { if (timezone != null) { currentCalendar = java.util.Calendar.getInstance(timezone, getLocale()); } else { currentCalendar = java.util.Calendar.getInstance(getLocale()); } if (customFirstDayOfWeek != null) { currentCalendar.setFirstDayOfWeek(customFirstDayOfWeek); } }
3.68
hadoop_ContentCounts_getLength
// Get the total of file length in bytes. public long getLength() { return contents.get(Content.LENGTH); }
3.68
hbase_VisibilityUtils_extractVisibilityTags
/** * Extract the visibility tags of the given Cell into the given List * @param cell - the cell * @param tags - the array that will be populated if visibility tags are present * @return The visibility tags serialization format */ public static Byte extractVisibilityTags(Cell cell, List<Tag> tags) { Byte serializationFormat = null; Iterator<Tag> tagsIterator = PrivateCellUtil.tagsIterator(cell); while (tagsIterator.hasNext()) { Tag tag = tagsIterator.next(); if (tag.getType() == TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE) { serializationFormat = Tag.getValueAsByte(tag); } else if (tag.getType() == VISIBILITY_TAG_TYPE) { tags.add(tag); } } return serializationFormat; }
3.68
hbase_ColumnFamilyDescriptorBuilder_setInMemory
/** * Set the inMemory flag * @param inMemory True if we are to favor keeping all values for this column family in the * HRegionServer cache * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setInMemory(boolean inMemory) { return setValue(IN_MEMORY_BYTES, Boolean.toString(inMemory)); }
3.68
graphhopper_GraphHopper_setPreciseIndexResolution
/** * Precise location resolution index means also more space (disc/RAM) could be consumed and * probably slower query times, which would be e.g. not suitable for Android. The resolution * specifies the tile width (in meter). */ public GraphHopper setPreciseIndexResolution(int precision) { ensureNotLoaded(); preciseIndexResolution = precision; return this; }
3.68
hadoop_ClasspathConstructor_buildLibDir
/** * Build a lib dir path * @param pathToLibDir path to the directory; may or may not end with a * trailing space * @return a path to a lib dir that is compatible with the java classpath */ public String buildLibDir(String pathToLibDir) { String dir = appendDirectoryTerminator(pathToLibDir); dir += "*"; return dir; }
3.68