name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_Preconditions_format
/** * A simplified formatting method. Similar to {@link String#format(String, Object...)}, but with * lower overhead (only String parameters, no locale, no format validation). * * <p>This method is taken quasi verbatim from the Guava Preconditions class. */ private static String format(@Nullable String template, @Nullable Object... args) { final int numArgs = args == null ? 0 : args.length; template = String.valueOf(template); // null -> "null" // start substituting the arguments into the '%s' placeholders StringBuilder builder = new StringBuilder(template.length() + 16 * numArgs); int templateStart = 0; int i = 0; while (i < numArgs) { int placeholderStart = template.indexOf("%s", templateStart); if (placeholderStart == -1) { break; } builder.append(template.substring(templateStart, placeholderStart)); builder.append(args[i++]); templateStart = placeholderStart + 2; } builder.append(template.substring(templateStart)); // if we run out of placeholders, append the extra args in square braces if (i < numArgs) { builder.append(" ["); builder.append(args[i++]); while (i < numArgs) { builder.append(", "); builder.append(args[i++]); } builder.append(']'); } return builder.toString(); }
3.68
flink_TimeWindow_getWindowStartWithOffset
/** * Method to get the window start for a timestamp. * * @param timestamp epoch millisecond to get the window start. * @param offset The offset which window start would be shifted by. * @param windowSize The size of the generated windows. * @return window start */ public static long getWindowStartWithOffset(long timestamp, long offset, long windowSize) { final long remainder = (timestamp - offset) % windowSize; // handle both positive and negative cases if (remainder < 0) { return timestamp - (remainder + windowSize); } else { return timestamp - remainder; } }
3.68
dubbo_AbstractRegistryFactory_createRegistryCacheKey
/** * Create the key for the registries cache. * This method may be overridden by the sub-class. * * @param url the registration {@link URL url} * @return non-null */ protected String createRegistryCacheKey(URL url) { return url.toServiceStringWithoutResolving(); }
3.68
flink_BufferConsumer_isDataAvailable
/** Returns true if there is new data available for reading. */ public boolean isDataAvailable() { return currentReaderPosition < writerPosition.getLatest(); }
3.68
framework_VColorPickerArea_refreshColor
/** * Update the color area with the currently set color. */ public void refreshColor() { if (color != null) { // Set the color area.getElement().getStyle().setProperty("background", color); } }
3.68
hadoop_YarnRegistryViewForProviders_registerSelf
/** * Add a service under a path for the current user. * @param record service record * @param deleteTreeFirst perform recursive delete of the path first * @return the path the service was created at * @throws IOException */ public String registerSelf( ServiceRecord record, boolean deleteTreeFirst) throws IOException { selfRegistrationPath = putService(user, serviceClass, instanceName, record, deleteTreeFirst); setSelfRegistration(record); return selfRegistrationPath; }
3.68
flink_TimeWindowUtil_toEpochMills
/** * Convert a timestamp mills with the given timezone to epoch mills. * * @param utcTimestampMills the timezone that the given timestamp mills has been shifted. * @param shiftTimeZone the timezone that the given timestamp mills has been shifted. * @return the epoch mills. */ public static long toEpochMills(long utcTimestampMills, ZoneId shiftTimeZone) { // Long.MAX_VALUE is a flag of max watermark, directly return it if (UTC_ZONE_ID.equals(shiftTimeZone) || Long.MAX_VALUE == utcTimestampMills) { return utcTimestampMills; } LocalDateTime utcTimestamp = LocalDateTime.ofInstant(Instant.ofEpochMilli(utcTimestampMills), UTC_ZONE_ID); return utcTimestamp.atZone(shiftTimeZone).toInstant().toEpochMilli(); }
3.68
flink_JoinOperator_projectTuple8
/** * Projects a pair of joined elements to a {@link Tuple} with the previously selected * fields. Requires the classes of the fields of the resulting tuples. * * @return The projected data set. * @see Tuple * @see DataSet */ public <T0, T1, T2, T3, T4, T5, T6, T7> ProjectJoin<I1, I2, Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>> projectTuple8() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes); TupleTypeInfo<Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>> tType = new TupleTypeInfo<Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>>(fTypes); return new ProjectJoin<I1, I2, Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>>( this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this); }
3.68
hbase_MultiTableSnapshotInputFormatImpl_getSnapshotsToScans
/** * Retrieve the snapshot name -&gt; list&lt;scan&gt; mapping pushed to configuration by * {@link #setSnapshotToScans(Configuration, Map)} * @param conf Configuration to extract name -&gt; list&lt;scan&gt; mappings from. * @return the snapshot name -&gt; list&lt;scan&gt; mapping pushed to configuration */ public Map<String, Collection<Scan>> getSnapshotsToScans(Configuration conf) throws IOException { Map<String, Collection<Scan>> rtn = Maps.newHashMap(); for (Map.Entry<String, String> entry : ConfigurationUtil.getKeyValues(conf, SNAPSHOT_TO_SCANS_KEY)) { String snapshotName = entry.getKey(); String scan = entry.getValue(); Collection<Scan> snapshotScans = rtn.get(snapshotName); if (snapshotScans == null) { snapshotScans = Lists.newArrayList(); rtn.put(snapshotName, snapshotScans); } snapshotScans.add(TableMapReduceUtil.convertStringToScan(scan)); } return rtn; }
3.68
flink_ExceptionUtils_findThrowable
/** * Checks whether a throwable chain contains an exception matching a predicate and returns it. * * @param throwable the throwable chain to check. * @param predicate the predicate of the exception to search for in the chain. * @return Optional throwable of the requested type if available, otherwise empty */ public static Optional<Throwable> findThrowable( Throwable throwable, Predicate<Throwable> predicate) { if (throwable == null || predicate == null) { return Optional.empty(); } Throwable t = throwable; while (t != null) { if (predicate.test(t)) { return Optional.of(t); } else { t = t.getCause(); } } return Optional.empty(); }
3.68
framework_TestBench_getTestableClassesForPackage
/** * Return all testable classes within given package. Class is considered * testable if it's superclass is Application or CustomComponent. * * @param packageName * @return * @throws ClassNotFoundException */ public static List<Class<?>> getTestableClassesForPackage( String packageName) throws Exception { final List<File> directories = new ArrayList<>(); try { final ClassLoader cld = Thread.currentThread() .getContextClassLoader(); if (cld == null) { throw new ClassNotFoundException("Can't get class loader."); } final String path = packageName.replace('.', '/'); // Ask for all resources for the path final Enumeration<URL> resources = cld.getResources(path); while (resources.hasMoreElements()) { final URL url = resources.nextElement(); directories.add(new File(url.getFile())); } } catch (final Exception x) { throw new Exception( packageName + " does not appear to be a valid package."); } final List<Class<?>> classes = new ArrayList<>(); // For every directory identified capture all the .class files for (final File directory : directories) { if (directory.exists()) { // Get the list of the files contained in the package final String[] files = directory.list(); for (int j = 0; j < files.length; j++) { // we are only interested in .class files if (files[j].endsWith(".class")) { // removes the .class extension final String p = packageName + '.' + files[j].substring(0, files[j].length() - 6); final Class<?> c = Class.forName(p); if (c.getSuperclass() != null) { if ((c.getSuperclass().equals( com.vaadin.server.VaadinSession.class))) { classes.add(c); } else if ((c.getSuperclass().equals( com.vaadin.ui.CustomComponent.class))) { classes.add(c); } } // for (Class cc : c.getInterfaces()) { // if (cc.equals(Testable.class)) { // // Class is testable // classes.add(c); // } // } } } } else { throw new ClassNotFoundException( packageName + " (" + directory.getPath() + ") does not appear to be a valid package"); } } return classes; }
3.68
graphhopper_WayToEdgesMap_reserve
/** * We need to reserve a way before we can put the associated edges into the map. * This way we can define a set of keys/ways for which we shall add edges later. */ public void reserve(long way) { offsetIndexByWay.put(way, RESERVED); }
3.68
framework_SelectItemCaptionRefresh_getTicketNumber
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber() */ @Override protected Integer getTicketNumber() { return 9250; }
3.68
dubbo_InjvmExporterListener_exported
/** * Overrides the exported method to add the given exporter to the exporters ConcurrentHashMap, * <p> * and to notify all registered ExporterChangeListeners of the export event. * * @param exporter The Exporter instance that has been exported. * @throws RpcException If there is an error during the export process. */ @Override public void exported(Exporter<?> exporter) throws RpcException { String serviceKey = exporter.getInvoker().getUrl().getServiceKey(); exporters.putIfAbsent(serviceKey, exporter); Set<ExporterChangeListener> listeners = exporterChangeListeners.get(serviceKey); if (!CollectionUtils.isEmpty(listeners)) { for (ExporterChangeListener listener : listeners) { listener.onExporterChangeExport(exporter); } } super.exported(exporter); }
3.68
hbase_HBaseTestingUtility_loadTable
/** * Load table of multiple column families with rows from 'aaa' to 'zzz'. * @param t Table * @param f Array of Families to load * @param value the values of the cells. If null is passed, the row key is used as value * @return Count of rows loaded. */ public int loadTable(final Table t, final byte[][] f, byte[] value, boolean writeToWAL) throws IOException { List<Put> puts = new ArrayList<>(); for (byte[] row : HBaseTestingUtility.ROWS) { Put put = new Put(row); put.setDurability(writeToWAL ? Durability.USE_DEFAULT : Durability.SKIP_WAL); for (int i = 0; i < f.length; i++) { byte[] value1 = value != null ? value : row; put.addColumn(f[i], f[i], value1); } puts.add(put); } t.put(puts); return puts.size(); }
3.68
hbase_OrderedBytes_isBlobCopy
/** * Return true when the next encoded value in {@code src} uses BlobCopy encoding, false otherwise. */ public static boolean isBlobCopy(PositionedByteRange src) { return BLOB_COPY == (-1 == Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek()); }
3.68
querydsl_BeanMap_setValue
/** * Sets the value. * * @param value the new value for the entry * @return the old value for the entry */ @Override public Object setValue(Object value) { String key = getKey(); Object oldValue = owner.get(key); owner.put(key, value); Object newValue = owner.get(key); this.value = newValue; return oldValue; }
3.68
streampipes_AbstractProcessingElementBuilder_setStream1
/** * @deprecated Use {@link #requiredStream(CollectedStreamRequirements)} instead */ @Deprecated(since = "0.90.0", forRemoval = true) public K setStream1() { stream1 = true; return me(); }
3.68
querydsl_Expressions_setPath
/** * Create a new Path expression * * @param type element type * @param queryType element expression type * @param metadata path metadata * @param <E> element type * @param <Q> element expression type * @return path expression */ public static <E, Q extends SimpleExpression<? super E>> SetPath<E, Q> setPath(Class<E> type, Class<Q> queryType, PathMetadata metadata) { return new SetPath<E, Q>(type, queryType, metadata); }
3.68
hudi_OrcUtils_fetchRecordKeysWithPositions
/** * Fetch {@link HoodieKey}s from the given ORC file. * * @param filePath The ORC file path. * @param configuration configuration to build fs object * @return {@link List} of {@link HoodieKey}s fetched from the ORC file */ @Override public List<Pair<HoodieKey, Long>> fetchRecordKeysWithPositions(Configuration configuration, Path filePath) { try { if (!filePath.getFileSystem(configuration).exists(filePath)) { return Collections.emptyList(); } } catch (IOException e) { throw new HoodieIOException("Failed to read from ORC file:" + filePath, e); } List<Pair<HoodieKey, Long>> hoodieKeysAndPositions = new ArrayList<>(); long position = 0; try (ClosableIterator<HoodieKey> iterator = getHoodieKeyIterator(configuration, filePath, Option.empty())) { while (iterator.hasNext()) { hoodieKeysAndPositions.add(Pair.of(iterator.next(), position)); position++; } } return hoodieKeysAndPositions; }
3.68
pulsar_LoadSimulationController_handleGroupTrade
// Handle the command line arguments associated with the group trade command. private void handleGroupTrade(final ShellArguments arguments) throws Exception { final List<String> commandArguments = arguments.commandArguments; // Group trade expects 3 application arguments: tenant name, group name, // and number of namespaces. if (checkAppArgs(commandArguments.size() - 1, 3)) { final String tenant = commandArguments.get(1); final String group = commandArguments.get(2); final int numNamespaces = Integer.parseInt(commandArguments.get(3)); for (int i = 0; i < numNamespaces; ++i) { for (int j = 0; j < arguments.topicsPerNamespace; ++j) { // For each namespace and topic pair, create the namespace // by using the group name and the // namespace index, and then create the topic by using the // topic index. Then just call trade. final String topic = makeTopic(tenant, String.format("%s-%d", group, i), Integer.toString(j)); trade(arguments, topic, random.nextInt(clients.length)); Thread.sleep(arguments.separation); } } } }
3.68
hadoop_DataNodeFaultInjector_badDecoding
/** * Used as a hook to inject data pollution * into an erasure coding reconstruction. */ public void badDecoding(ByteBuffer[] outputs) {}
3.68
hbase_HBaseTestingUtility_countRows
/** * Return the number of rows in the given table. */ public int countRows(final TableName tableName) throws IOException { Table table = getConnection().getTable(tableName); try { return countRows(table); } finally { table.close(); } }
3.68
graphhopper_VectorTile_hasUintValue
/** * <code>optional uint64 uint_value = 5;</code> */ public boolean hasUintValue() { return ((bitField0_ & 0x00000010) == 0x00000010); }
3.68
framework_BackEndDataProvider_setSortOrders
/** * Sets the sort order to use, given a {@link QuerySortOrderBuilder}. * Shorthand for {@code setSortOrders(builder.build())}. * * @see QuerySortOrderBuilder * * @param builder * the sort builder to retrieve the sort order from * @throws NullPointerException * if builder is null */ default void setSortOrders(QuerySortOrderBuilder builder) { Objects.requireNonNull(builder, "Sort builder cannot be null."); setSortOrders(builder.build()); }
3.68
hbase_BackupManager_readBackupStartCode
/** * Read the last backup start code (timestamp) of last successful backup. Will return null if * there is no startcode stored in backup system table or the value is of length 0. These two * cases indicate there is no successful backup completed so far. * @return the timestamp of a last successful backup * @throws IOException exception */ public String readBackupStartCode() throws IOException { return systemTable.readBackupStartCode(backupInfo.getBackupRootDir()); }
3.68
flink_CheckpointedInputGate_getNumberOfInputChannels
/** @return number of underlying input channels. */ public int getNumberOfInputChannels() { return inputGate.getNumberOfInputChannels(); }
3.68
hadoop_TimelineStateStore_serviceStop
/** * Shutdown the state storage. * * @throws IOException */ @Override public void serviceStop() throws IOException { closeStorage(); }
3.68
hadoop_FlowRunCoprocessor_preScannerOpen
/* * (non-Javadoc) * * Ensures that max versions are set for the Scan so that metrics can be * correctly aggregated and min/max can be correctly determined. * * @see * org.apache.hadoop.hbase.coprocessor.BaseRegionObserver#preScannerOpen(org * .apache.hadoop.hbase.coprocessor.ObserverContext, * org.apache.hadoop.hbase.client.Scan) */ @Override public void preScannerOpen( ObserverContext<RegionCoprocessorEnvironment> e, Scan scan) throws IOException { // set max versions for scan to see all // versions to aggregate for metrics scan.setMaxVersions(); }
3.68
hbase_HRegionFileSystem_getStoreDir
// =========================================================================== // Store/StoreFile Helpers // =========================================================================== /** * Returns the directory path of the specified family * @param familyName Column Family Name * @return {@link Path} to the directory of the specified family */ public Path getStoreDir(final String familyName) { return new Path(this.getRegionDir(), familyName); }
3.68
hadoop_LongLong_toString
/** {@inheritDoc} */ @Override public String toString() { final int remainder = BITS_PER_LONG % 4; return String.format("%x*2^%d + %016x", d1<<remainder, BITS_PER_LONG-remainder, d0); }
3.68
shardingsphere-elasticjob_TaskContext_getTaskName
/** * Get task name. * * @return task name */ public String getTaskName() { return String.join(DELIMITER, metaInfo.toString(), type.toString(), slaveId); }
3.68
framework_AbstractSelect_setItemIcon
/** * Sets the icon for an item. * * @param itemId * the id of the item to be assigned an icon. * @param icon * the icon to use or null. */ public void setItemIcon(Object itemId, Resource icon) { if (itemId != null) { if (icon == null) { itemIcons.remove(itemId); } else { itemIcons.put(itemId, icon); } markAsDirty(); } }
3.68
pulsar_WorkerServiceLoader_load
/** * Load the worker services for the given <tt>protocol</tt> list. * * @param wsNarPackage worker service nar package * @param narExtractionDirectory the directory to extract nar directory * @return the worker service */ static WorkerService load(String wsNarPackage, String narExtractionDirectory) { if (isEmpty(wsNarPackage)) { return new PulsarWorkerService(); } WorkerServiceDefinition definition; try { definition = getWorkerServiceDefinition( wsNarPackage, narExtractionDirectory ); } catch (IOException ioe) { log.error("Failed to get the worker service definition from {}", wsNarPackage, ioe); throw new RuntimeException("Failed to get the worker service definition from " + wsNarPackage, ioe); } WorkerServiceMetadata metadata = new WorkerServiceMetadata(); Path narPath = Paths.get(wsNarPackage); metadata.setArchivePath(narPath); metadata.setDefinition(definition); WorkerServiceWithClassLoader service; try { service = load(metadata, narExtractionDirectory); } catch (IOException e) { log.error("Failed to load the worker service {}", metadata, e); throw new RuntimeException("Failed to load the worker service " + metadata, e); } log.info("Successfully loaded worker service {}", metadata); return service; }
3.68
flink_MemoryManager_getPageSize
/** * Gets the size of the pages handled by the memory manager. * * @return The size of the pages handled by the memory manager. */ public int getPageSize() { return (int) pageSize; }
3.68
zxing_MinimalEncoder_getDataBytes
// Important: The function does not return the length bytes (one or two) in case of B256 encoding byte[] getDataBytes() { switch (mode) { case ASCII: if (input.isECI(fromPosition)) { return getBytes(241,input.getECIValue(fromPosition) + 1); } else if (isExtendedASCII(input.charAt(fromPosition), input.getFNC1Character())) { return getBytes(235,input.charAt(fromPosition) - 127); } else if (characterLength == 2) { return getBytes((input.charAt(fromPosition) - '0') * 10 + input.charAt(fromPosition + 1) - '0' + 130); } else if (input.isFNC1(fromPosition)) { return getBytes(232); } else { return getBytes(input.charAt(fromPosition) + 1); } case B256: return getBytes(input.charAt(fromPosition)); case C40: return getC40Words(true, input.getFNC1Character()); case TEXT: return getC40Words(false, input.getFNC1Character()); case X12: return getX12Words(); case EDF: return getEDFBytes(); } assert false; return new byte[0]; }
3.68
hudi_HoodieMetaSyncOperations_getAllPartitions
/** * Get all partitions for the table in the metastore. */ default List<Partition> getAllPartitions(String tableName) { return Collections.emptyList(); }
3.68
flink_BlobLibraryCacheManager_getNumberOfReferenceHolders
/** * Gets the number of tasks holding {@link ClassLoader} references for the given job. * * @param jobId ID of a job * @return number of reference holders */ int getNumberOfReferenceHolders(JobID jobId) { synchronized (lockObject) { LibraryCacheEntry entry = cacheEntries.get(jobId); return entry == null ? 0 : entry.getReferenceCount(); } }
3.68
hbase_RegionInfo_getTable
/** * Gets the table name from the specified region name. * @param regionName to extract the table name from * @return Table name */ @InterfaceAudience.Private // This method should never be used. Its awful doing parse from bytes. // It is fallback in case we can't get the tablename any other way. Could try removing it. // Keeping it Audience Private so can remove at later date. static TableName getTable(final byte[] regionName) { int offset = -1; for (int i = 0; i < regionName.length; i++) { if (regionName[i] == HConstants.DELIMITER) { offset = i; break; } } if (offset <= 0) { throw new IllegalArgumentException("offset=" + offset); } byte[] buff = new byte[offset]; System.arraycopy(regionName, 0, buff, 0, offset); return TableName.valueOf(buff); }
3.68
flink_TpchResultComparator_round
/** Rounding function defined in TPC-H standard specification v2.18.0 chapter 10. */ private static double round(double x, int m) { if (x < 0) { throw new IllegalArgumentException("x must be non-negative"); } double y = x + 5 * Math.pow(10, -m - 1); double z = y * Math.pow(10, m); double q = Math.floor(z); return q / Math.pow(10, m); }
3.68
morf_SqlScriptExecutor_withQueryTimeout
/** * @see org.alfasoftware.morf.jdbc.SqlScriptExecutor.QueryBuilder#withQueryTimeout(int) */ @Override public QueryBuilder withQueryTimeout(int queryTimeout) { this.queryTimeout = Optional.of(queryTimeout); return this; }
3.68
graphhopper_Instruction_getSign
/** * The instruction for the person/driver to execute. */ public int getSign() { return sign; }
3.68
framework_DesignContext_writePackageMappings
/** * Writes the package mappings (prefix -> package name) of this object to * the specified document. * <p> * The prefixes are stored as <meta> tags under <head> in the document. * * @param doc * the Jsoup document tree where the package mappings are written */ public void writePackageMappings(Document doc) { Element head = doc.head(); for (String prefix : getPackagePrefixes()) { // Only store the prefix-name mapping if it is not a default mapping // (such as "vaadin" -> "com.vaadin.ui") if (!VAADIN_PREFIX.equals(prefix) && !VAADIN7_PREFIX.equals(prefix) && !LEGACY_PREFIX.equals(prefix)) { Node newNode = doc.createElement("meta"); newNode.attr("name", "package-mapping"); String prefixToPackageName = prefix + ":" + getPackage(prefix); newNode.attr("content", prefixToPackageName); head.appendChild(newNode); } } }
3.68
flink_PythonEnvUtils_startGatewayServer
/** * Creates a GatewayServer run in a daemon thread. * * @return The created GatewayServer */ static GatewayServer startGatewayServer() throws ExecutionException, InterruptedException { CompletableFuture<GatewayServer> gatewayServerFuture = new CompletableFuture<>(); Thread thread = new Thread( () -> { try (NetUtils.Port port = NetUtils.getAvailablePort()) { int freePort = port.getPort(); GatewayServer server = new GatewayServer.GatewayServerBuilder() .gateway( new Gateway( new ConcurrentHashMap< String, Object>(), new CallbackClient(freePort))) .javaPort(0) .build(); resetCallbackClientExecutorService(server); gatewayServerFuture.complete(server); server.start(true); } catch (Throwable e) { gatewayServerFuture.completeExceptionally(e); } }); thread.setName("py4j-gateway"); thread.setDaemon(true); thread.start(); thread.join(); return gatewayServerFuture.get(); }
3.68
framework_ApplicationConfiguration_isDebugMode
/** * Checks if client side is in debug mode. Practically this is invoked by * adding ?debug parameter to URI. Please note that debug mode is always * disabled if production mode is enabled, but disabling production mode * does not automatically enable debug mode. * * @see #isProductionMode() * * @return true if client side is currently been debugged */ public static boolean isDebugMode() { return isDebugAvailable() && Window.Location.getParameter("debug") != null; }
3.68
hibernate-validator_ISBNValidator_checkChecksumISBN10
/** * Check the digits for ISBN 10 using algorithm from * <a href="https://en.wikipedia.org/wiki/International_Standard_Book_Number#ISBN-10_check_digits">Wikipedia</a>. */ private static boolean checkChecksumISBN10(String isbn) { int sum = 0; for ( int i = 0; i < isbn.length() - 1; i++ ) { sum += ( isbn.charAt( i ) - '0' ) * ( 10 - i ); } sum += isbn.charAt( 9 ) == 'X' ? 10 : isbn.charAt( 9 ) - '0'; return ( sum % 11 ) == 0; }
3.68
flink_TypeExtractionUtils_sameTypeVars
/** Checks whether two types are type variables describing the same. */ public static boolean sameTypeVars(Type t1, Type t2) { return t1 instanceof TypeVariable && t2 instanceof TypeVariable && ((TypeVariable<?>) t1).getName().equals(((TypeVariable<?>) t2).getName()) && ((TypeVariable<?>) t1) .getGenericDeclaration() .equals(((TypeVariable<?>) t2).getGenericDeclaration()); }
3.68
morf_AbstractSqlDialectTest_expectedUsesNVARCHARforStrings
/** * Override to set the expected NVARCHAR behaviour. * @return whether to use NVARCHAR for strings or not */ protected boolean expectedUsesNVARCHARforStrings() { return false; }
3.68
flink_ExtractionUtils_extractConstructorParameterNames
/** * Extracts ordered parameter names from a constructor that takes all of the given fields with * matching (possibly primitive and lenient) type and name. */ private static @Nullable List<String> extractConstructorParameterNames( Constructor<?> constructor, List<Field> fields) { final Type[] parameterTypes = constructor.getGenericParameterTypes(); List<String> parameterNames = extractExecutableNames(constructor); if (parameterNames == null) { return null; } final Map<String, Field> fieldMap = fields.stream() .collect( Collectors.toMap( f -> normalizeAccessorName(f.getName()), Function.identity())); // check that all fields are represented in the parameters of the constructor final List<String> fieldNames = new ArrayList<>(); for (int i = 0; i < parameterNames.size(); i++) { final String parameterName = normalizeAccessorName(parameterNames.get(i)); final Field field = fieldMap.get(parameterName); if (field == null) { return null; } final Type fieldType = field.getGenericType(); final Type parameterType = parameterTypes[i]; // we are tolerant here because frameworks such as Avro accept a boxed type even though // the field is primitive if (!primitiveToWrapper(parameterType).equals(primitiveToWrapper(fieldType))) { return null; } fieldNames.add(field.getName()); } return fieldNames; }
3.68
framework_Range_intersects
/** * Checks whether this range and another range are at least partially * covering the same values. * * @param other * the other range to check against * @return <code>true</code> if this and <code>other</code> intersect */ public boolean intersects(final Range other) { return getStart() < other.getEnd() && other.getStart() < getEnd(); }
3.68
pulsar_ResourceUnitRanking_estimateMaxCapacity
/** * Estimate the maximum number of namespace bundles ths ResourceUnit is able to handle with all resource. */ public long estimateMaxCapacity(ResourceQuota defaultQuota) { return calculateBrokerMaxCapacity(this.systemResourceUsage, defaultQuota); }
3.68
streampipes_TextDocument_getTitle
/** * Returns the "main" title for this document, or <code>null</code> if no such title has ben set. * * @return The "main" title. */ public String getTitle() { return title; }
3.68
hadoop_User_getLastLogin
/** * Get the time of the last login. * @return the number of milliseconds since the beginning of time. */ public long getLastLogin() { return lastLogin; }
3.68
flink_OperationManager_closeOperation
/** * Close the operation and release all resources used by the {@link Operation}. * * @param operationHandle identifies the {@link Operation}. */ public void closeOperation(OperationHandle operationHandle) { writeLock( () -> { Operation opToRemove = submittedOperations.remove(operationHandle); if (opToRemove != null) { opToRemove.close(); } }); }
3.68
hadoop_LongBitFormat_retrieve
/** Retrieve the value from the record. */ public long retrieve(long record) { return (record & MASK) >>> OFFSET; }
3.68
flink_ExtractionUtils_collectStructuredMethods
/** Collects all methods that qualify as methods of a {@link StructuredType}. */ static List<Method> collectStructuredMethods(Class<?> clazz) { final List<Method> methods = new ArrayList<>(); while (clazz != Object.class) { final Method[] declaredMethods = clazz.getDeclaredMethods(); Stream.of(declaredMethods) .filter( field -> { final int m = field.getModifiers(); return Modifier.isPublic(m) && !Modifier.isNative(m) && !Modifier.isAbstract(m); }) .forEach(methods::add); clazz = clazz.getSuperclass(); } return methods; }
3.68
dubbo_ConfigurationUtils_getEnvConfiguration
/** * For compact single instance * * @deprecated Replaced to {@link ConfigurationUtils#getEnvConfiguration(ScopeModel)} */ @Deprecated public static Configuration getEnvConfiguration() { return ApplicationModel.defaultModel().modelEnvironment().getEnvironmentConfiguration(); }
3.68
flink_FlinkExtendedParser_parseSet
/** * Convert the statement to {@link SetOperation} with Flink's parse rule. * * @return the {@link SetOperation}, empty if the statement is not set command. */ public static Optional<Operation> parseSet(String statement) { if (SetOperationParseStrategy.INSTANCE.match(statement)) { return Optional.of(SetOperationParseStrategy.INSTANCE.convert(statement)); } return Optional.empty(); }
3.68
rocketmq-connect_ColumnDefinition_displaySize
/** * Indicates the column's normal maximum width in characters. * * @return the normal maximum number of characters allowed as the width of the designated column */ public int displaySize() { return displaySize; }
3.68
hibernate-validator_Filters_excludeInterfaces
/** * Returns a filter which excludes interfaces. * * @return a filter which excludes interfaces */ public static Filter excludeInterfaces() { return INTERFACES_FILTER; }
3.68
zxing_DataBlock_getDataBlocks
/** * <p>When QR Codes use multiple data blocks, they are actually interleaved. * That is, the first byte of data block 1 to n is written, then the second bytes, and so on. This * method will separate the data into original blocks.</p> * * @param rawCodewords bytes as read directly from the QR Code * @param version version of the QR Code * @param ecLevel error-correction level of the QR Code * @return DataBlocks containing original bytes, "de-interleaved" from representation in the * QR Code */ static DataBlock[] getDataBlocks(byte[] rawCodewords, Version version, ErrorCorrectionLevel ecLevel) { if (rawCodewords.length != version.getTotalCodewords()) { throw new IllegalArgumentException(); } // Figure out the number and size of data blocks used by this version and // error correction level Version.ECBlocks ecBlocks = version.getECBlocksForLevel(ecLevel); // First count the total number of data blocks int totalBlocks = 0; Version.ECB[] ecBlockArray = ecBlocks.getECBlocks(); for (Version.ECB ecBlock : ecBlockArray) { totalBlocks += ecBlock.getCount(); } // Now establish DataBlocks of the appropriate size and number of data codewords DataBlock[] result = new DataBlock[totalBlocks]; int numResultBlocks = 0; for (Version.ECB ecBlock : ecBlockArray) { for (int i = 0; i < ecBlock.getCount(); i++) { int numDataCodewords = ecBlock.getDataCodewords(); int numBlockCodewords = ecBlocks.getECCodewordsPerBlock() + numDataCodewords; result[numResultBlocks++] = new DataBlock(numDataCodewords, new byte[numBlockCodewords]); } } // All blocks have the same amount of data, except that the last n // (where n may be 0) have 1 more byte. Figure out where these start. int shorterBlocksTotalCodewords = result[0].codewords.length; int longerBlocksStartAt = result.length - 1; while (longerBlocksStartAt >= 0) { int numCodewords = result[longerBlocksStartAt].codewords.length; if (numCodewords == shorterBlocksTotalCodewords) { break; } longerBlocksStartAt--; } longerBlocksStartAt++; int shorterBlocksNumDataCodewords = shorterBlocksTotalCodewords - ecBlocks.getECCodewordsPerBlock(); // The last elements of result may be 1 element longer; // first fill out as many elements as all of them have int rawCodewordsOffset = 0; for (int i = 0; i < shorterBlocksNumDataCodewords; i++) { for (int j = 0; j < numResultBlocks; j++) { result[j].codewords[i] = rawCodewords[rawCodewordsOffset++]; } } // Fill out the last data block in the longer ones for (int j = longerBlocksStartAt; j < numResultBlocks; j++) { result[j].codewords[shorterBlocksNumDataCodewords] = rawCodewords[rawCodewordsOffset++]; } // Now add in error correction blocks int max = result[0].codewords.length; for (int i = shorterBlocksNumDataCodewords; i < max; i++) { for (int j = 0; j < numResultBlocks; j++) { int iOffset = j < longerBlocksStartAt ? i : i + 1; result[j].codewords[iOffset] = rawCodewords[rawCodewordsOffset++]; } } return result; }
3.68
framework_GridConnector_purgeRemovedColumns
/** * Removes any orphan columns that has been removed from the state from the * grid */ private void purgeRemovedColumns() { // Get columns still registered in the state Set<String> columnsInState = new HashSet<String>(); for (GridColumnState columnState : getState().columns) { columnsInState.add(columnState.id); } // Remove column no longer in state Iterator<String> columnIdIterator = columnIdToColumn.keySet() .iterator(); while (columnIdIterator.hasNext()) { String id = columnIdIterator.next(); if (!columnsInState.contains(id)) { CustomGridColumn column = columnIdToColumn.get(id); columnIdIterator.remove(); getWidget().removeColumn(column); columnOrder.remove(id); } } }
3.68
hbase_SnapshotInfo_getMobStoreFilePercentage
/** Returns the percentage of the mob store files */ public float getMobStoreFilePercentage() { return ((float) hfilesMobSize.get() / (getStoreFilesSize())) * 100; }
3.68
flink_AliasOperationUtils_createAliasList
/** * Creates a list of valid alias expressions. Resulting expression might still contain {@link * UnresolvedReferenceExpression}. * * @param aliases aliases to validate * @param child relational operation on top of which to apply the aliases * @return validated list of aliases */ static List<Expression> createAliasList(List<Expression> aliases, QueryOperation child) { ResolvedSchema childSchema = child.getResolvedSchema(); if (aliases.size() > childSchema.getColumnCount()) { throw new ValidationException("Aliasing more fields than we actually have."); } List<ValueLiteralExpression> fieldAliases = aliases.stream() .map(f -> f.accept(aliasLiteralValidator)) .collect(Collectors.toList()); List<String> childNames = childSchema.getColumnNames(); return IntStream.range(0, childNames.size()) .mapToObj( idx -> { UnresolvedReferenceExpression oldField = unresolvedRef(childNames.get(idx)); if (idx < fieldAliases.size()) { ValueLiteralExpression alias = fieldAliases.get(idx); return unresolvedCall( BuiltInFunctionDefinitions.AS, oldField, alias); } else { return oldField; } }) .collect(Collectors.toList()); }
3.68
querydsl_MultiSurfaceExpression_area
/** * The area of this MultiSurface, as measured in the spatial reference system of this MultiSurface. * * @return area */ public NumberExpression<Double> area() { if (area == null) { area = Expressions.numberOperation(Double.class, SpatialOps.AREA, mixin); } return area; }
3.68
pulsar_AdminProxyHandler_copyRequest
/** * Ensure the Authorization header is carried over after a 307 redirect * from brokers. */ @Override protected Request copyRequest(HttpRequest oldRequest, URI newURI) { String authorization = oldRequest.getHeaders().get(HttpHeader.AUTHORIZATION); Request newRequest = super.copyRequest(oldRequest, newURI); if (authorization != null) { newRequest.header(HttpHeader.AUTHORIZATION, authorization); } return newRequest; }
3.68
hbase_FilterList_filterRowCells
/** * Filters that never filter by modifying the returned List of Cells can inherit this * implementation that does nothing. {@inheritDoc} */ @Override public void filterRowCells(List<Cell> cells) throws IOException { filterListBase.filterRowCells(cells); }
3.68
flink_SplitsChange_splits
/** @return the list of splits. */ public List<SplitT> splits() { return Collections.unmodifiableList(splits); }
3.68
morf_GraphBasedUpgradeBuilder_analyzeDependency
/** * Checks dependencies of current node and previously processed node to * establish if an edge should be added. * * @param processed previously processed node * @param node current node * @param remainingReads read-level dependencies which haven't been reflected in the graph so far * @param remainingModifies modify-level dependencies which haven't been reflected in the graph so far * @param removeAtNextModify list of dependencies which will be suppressed during the next write-based edge creation attempt */ private void analyzeDependency(GraphBasedUpgradeNode processed, GraphBasedUpgradeNode node, Set<String> remainingReads, Set<String> remainingModifies, Set<String> removeAtNextModify) { // processed writes intersection with writes of the current node SetView<String> view1 = Sets.intersection(processed.getModifies(), remainingModifies); view1.stream().forEach(hit -> { if (removeAtNextModify.contains(hit)) { LOG.debug("Node: " + node.getName() + " does NOT depend on " + processed.getName() + " because of writes-writes (current-processed) intersection has been suppressed at: " + hit + "."); removeAtNextModify.remove(hit); } else { addEdge(processed, node); LOG.debug("Node: " + node.getName() + " depends on " + processed.getName() + " because of writes-writes (current-processed) intersection at: " + hit + "."); } remainingModifies.remove(hit); }); // processed writes intersection with reads of the current node SetView<String> view2 = Sets.intersection(processed.getModifies(), remainingReads); view2.stream().forEach(hit -> { addEdge(processed, node); remainingReads.remove(hit); LOG.debug("Node: " + node.getName() + " depends on " + processed.getName() + " because of reads-writes (current-processed) intersection at: " + hit + "."); }); // processed reads intersection with writes of the current node SetView<String> view3 = Sets.intersection(processed.getReads(), remainingModifies); view3.stream().forEach(hit -> { addEdge(processed, node); removeAtNextModify.add(hit); LOG.debug("Node: " + node.getName() + " depends on " + processed.getName() + " because of writes-reads (current-processed) intersection at: " + hit + ". Adding this table to removeAtNextModify."); }); if (!node.getParents().contains(processed)) { LOG.debug("No edges have been created between potential parent: " + processed.getName() + " and node: " + node.getName()); } }
3.68
hadoop_AbstractRouterPolicy_getReservationHomeSubcluster
/** * This method provides a wrapper of all policy functionalities for routing a * reservation. Internally it manages configuration changes, and policy * init/reinit. * * @param request the reservation to route. * * @return the id of the subcluster that will be the "home" for this * reservation. * * @throws YarnException if there are issues initializing policies, or no * valid sub-cluster id could be found for this reservation. */ @Override public SubClusterId getReservationHomeSubcluster(ReservationSubmissionRequest request) throws YarnException { if (request == null) { throw new FederationPolicyException("The ReservationSubmissionRequest cannot be null."); } if (request.getQueue() == null) { request.setQueue(YarnConfiguration.DEFAULT_QUEUE_NAME); } // apply filtering based on reservation location and active sub-clusters Map<SubClusterId, SubClusterInfo> filteredSubClusters = getActiveSubclusters(); // pick the chosen subCluster from the active ones return chooseSubCluster(request.getQueue(), filteredSubClusters); }
3.68
flink_ExecutableOperationUtils_createDynamicTableSink
/** * Creates a {@link DynamicTableSink} from a {@link CatalogTable}. * * <p>It'll try to create table sink from to {@param catalog}, then try to create from {@param * sinkFactorySupplier} passed secondly. Otherwise, an attempt is made to discover a matching * factory using Java SPI (see {@link Factory} for details). */ public static DynamicTableSink createDynamicTableSink( @Nullable Catalog catalog, Supplier<Optional<DynamicTableSinkFactory>> sinkFactorySupplier, ObjectIdentifier objectIdentifier, ResolvedCatalogTable catalogTable, Map<String, String> enrichmentOptions, ReadableConfig configuration, ClassLoader classLoader, boolean isTemporary) { DynamicTableSinkFactory dynamicTableSinkFactory = null; if (catalog != null && catalog.getFactory().isPresent() && catalog.getFactory().get() instanceof DynamicTableSinkFactory) { // try get from catalog dynamicTableSinkFactory = (DynamicTableSinkFactory) catalog.getFactory().get(); } if (dynamicTableSinkFactory == null) { dynamicTableSinkFactory = sinkFactorySupplier.get().orElse(null); } return FactoryUtil.createDynamicTableSink( dynamicTableSinkFactory, objectIdentifier, catalogTable, enrichmentOptions, configuration, classLoader, isTemporary); }
3.68
hbase_ServerListener_waiting
/** * Started waiting on RegionServers to check-in. */ default void waiting() { }
3.68
hadoop_RouterAuditLogger_add
/** * Appends the key-val pair to the passed builder in the following format * <pair-delim>key=value. */ static void add(Keys key, String value, StringBuilder b) { b.append(AuditConstants.PAIR_SEPARATOR).append(key.name()) .append(AuditConstants.KEY_VAL_SEPARATOR).append(value); }
3.68
flink_FileChannelOutputView_getBlockCount
/** * Gets the number of blocks written by this output view. * * @return The number of blocks written by this output view. */ public int getBlockCount() { return numBlocksWritten; }
3.68
framework_HasFilterableDataProvider_setDataProvider
/** * Sets the data provider for this listing. The data provider is queried for * displayed items as needed. * * @param dataProvider * the data provider, not <code>null</code> */ public default void setDataProvider(DataProvider<T, F> dataProvider) { setDataProvider(dataProvider, SerializableFunction.identity()); }
3.68
framework_Color_setBlue
/** * Sets the blue value of the color. Value must be within the range [0, * 255]. * * @param blue * new blue value */ public void setBlue(int blue) { if (withinRange(blue)) { this.blue = blue; } else { throw new IllegalArgumentException(OUTOFRANGE + blue); } }
3.68
hbase_StorageClusterStatusModel_getDeadNode
/** * @param index the index * @return the dead region server's name */ public String getDeadNode(int index) { return deadNodes.get(index); }
3.68
hbase_LoadBalancerFactory_getLoadBalancer
/** * Create a loadbalancer from the given conf. * @return A {@link LoadBalancer} */ public static LoadBalancer getLoadBalancer(Configuration conf) { // Create the balancer Class<? extends LoadBalancer> balancerKlass = conf.getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, getDefaultLoadBalancerClass(), LoadBalancer.class); return ReflectionUtils.newInstance(balancerKlass); }
3.68
hbase_RegionSplitter_getRegionServerCount
/** * Alternative getCurrentNrHRS which is no longer available. * @return Rough count of regionservers out on cluster. * @throws IOException if a remote or network exception occurs */ private static int getRegionServerCount(final Connection connection) throws IOException { try (Admin admin = connection.getAdmin()) { Collection<ServerName> servers = admin.getRegionServers(); return servers == null || servers.isEmpty() ? 0 : servers.size(); } }
3.68
morf_AbstractSqlDialectTest_testMathsDivide
/** * Test that adding numbers returns as expected. */ @Test public void testMathsDivide() { String result = testDialect.getSqlFrom(new MathsField(new FieldLiteral(1), MathsOperator.DIVIDE, new FieldLiteral(1))); assertEquals(expectedMathsDivide(), result); }
3.68
framework_VFilterSelect_handleMouseDownEvent
/** * Handles special behavior of the mouse down event * * @param event */ private void handleMouseDownEvent(Event event) { /* * Prevent the keyboard focus from leaving the textfield by preventing * the default behavior of the browser. Fixes #4285. */ if (event.getTypeInt() == Event.ONMOUSEDOWN) { event.preventDefault(); event.stopPropagation(); /* * In IE the above wont work, the blur event will still trigger. So, * we set a flag here to prevent the next blur event from happening. * This is not needed if do not already have focus, in that case * there will not be any blur event and we should not cancel the * next blur. */ if (BrowserInfo.get().isIE() && focused) { preventNextBlurEventInIE = true; debug("VFS: Going to prevent next blur event on IE"); } } }
3.68
hbase_MasterRegionFlusherAndCompactor_setupConf
// inject our flush related configurations static void setupConf(Configuration conf, long flushSize, long flushPerChanges, long flushIntervalMs) { conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSize); conf.setLong(HRegion.MEMSTORE_FLUSH_PER_CHANGES, flushPerChanges); conf.setLong(HRegion.MEMSTORE_PERIODIC_FLUSH_INTERVAL, flushIntervalMs); LOG.info("Injected flushSize={}, flushPerChanges={}, flushIntervalMs={}", flushSize, flushPerChanges, flushIntervalMs); }
3.68
hbase_Response_getCode
/** Returns the HTTP response code */ public int getCode() { return code; }
3.68
framework_AbstractSingleComponentContainerConnector_getContentWidget
/** * Returns the widget (if any) of the content of the container. * * @return widget of the only/first connector of the container, null if no * content or if there is no widget for the connector */ protected Widget getContentWidget() { ComponentConnector content = getContent(); if (null != content) { return content.getWidget(); } else { return null; } }
3.68
hadoop_RenameOperation_copyEmptyDirectoryMarkers
/** * Process all directory markers at the end of the rename. * All leaf markers are queued to be copied in the store; * <p> * Why not simply create new markers? All the metadata * gets copied too, so if there was anything relevant then * it would be preserved. * <p> * At the same time: markers aren't valued much and may * be deleted without any safety checks -so if there was relevant * data it is at risk of destruction at any point. * If there are lots of empty directory rename operations taking place, * the decision to copy the source may need revisiting. * Be advised though: the costs of the copy not withstanding, * it is a lot easier to have one single type of scheduled copy operation * than have copy and touch calls being scheduled. * <p> * The duration returned is the time to initiate all copy/delete operations, * including any blocking waits for active copies and paged deletes * to execute. There may still be outstanding operations * queued by this method -the duration may be an underestimate * of the time this operation actually takes. * * @param srcKey source key with trailing / * @param dstKey dest key with trailing / * @param dirMarkerTracker tracker of markers * @return how long it took. */ private OperationDuration copyEmptyDirectoryMarkers( final String srcKey, final String dstKey, final DirMarkerTracker dirMarkerTracker) throws IOException { // directory marker work. LOG.debug("Copying markers from {}", dirMarkerTracker); final StoreContext storeContext = getStoreContext(); Map<Path, DirMarkerTracker.Marker> leafMarkers = dirMarkerTracker.getLeafMarkers(); Map<Path, DirMarkerTracker.Marker> surplus = dirMarkerTracker.getSurplusMarkers(); // for all leaf markers: copy the original DurationInfo duration = new DurationInfo(LOG, false, "copying %d leaf markers with %d surplus not copied", leafMarkers.size(), surplus.size()); for (DirMarkerTracker.Marker entry: leafMarkers.values()) { String key = entry.getKey(); String newDestKey = dstKey + key.substring(srcKey.length()); Path childDestPath = storeContext.keyToPath(newDestKey); LOG.debug("copying dir marker from {} to {}", key, newDestKey); activeCopies.add( initiateCopy( entry.getStatus(), key, newDestKey, childDestPath)); queueToDelete(entry); // end of loop endOfLoopActions(); } duration.close(); return duration; }
3.68
framework_DragSourceExtensionConnector_fixDragImageTransformForMobile
/** * Fix drag image offset for touch devices when the dragged image has been * offset with css transform: translate/translate3d. * <p> * This necessary for e.g grid rows. * <p> * This method is NOOP for non-touch browsers. * * @param draggedElement * the element that forms the drag image */ protected void fixDragImageTransformForMobile(Element draggedElement) { if (!BrowserInfo.get().isTouchDevice()) { return; } Style style = draggedElement.getStyle(); String transition = style.getProperty("transform"); if (transition == null || transition.isEmpty() || !transition.startsWith("translate")) { return; } style.clearProperty("transform"); AnimationScheduler.get() .requestAnimationFrame( timestamp -> draggedElement.getStyle() .setProperty("transform", transition), draggedElement); }
3.68
framework_Form_removeAllActionHandlers
/** * Removes all action handlers. */ public void removeAllActionHandlers() { if (ownActionManager != null) { ownActionManager.removeAllActionHandlers(); } }
3.68
shardingsphere-elasticjob_JobFacade_misfireIfRunning
/** * Set task misfire flag. * * @param shardingItems sharding items to be set misfire flag * @return whether satisfy misfire condition */ public boolean misfireIfRunning(final Collection<Integer> shardingItems) { return executionService.misfireIfHasRunningItems(shardingItems); }
3.68
framework_AbstractComponent_getId
/* * (non-Javadoc) * * @see com.vaadin.ui.Component#getId() */ @Override public String getId() { return getState(false).id; }
3.68
flink_NFACompiler_setCurrentGroupPatternFirstOfLoop
/** * Marks the current group pattern as the head of the TIMES quantifier or not. * * @param isFirstOfLoop whether the current group pattern is the head of the TIMES * quantifier */ @SuppressWarnings("unchecked") private void setCurrentGroupPatternFirstOfLoop(boolean isFirstOfLoop) { if (currentPattern instanceof GroupPattern) { firstOfLoopMap.put((GroupPattern<T, ?>) currentPattern, isFirstOfLoop); } }
3.68
graphhopper_CustomModelParser_createClazz
/** * This method does the following: * <ul> * <li>0. optionally we already checked the right-hand side expressions before this method call in FindMinMax.checkLMConstraints * (only the client-side custom model statements) * </li> * <li>1. determine minimum and maximum values via parsing the right-hand side expression -> done in ValueExpressionVisitor. * We need the maximum values for a simple negative check AND for the CustomWeighting.Parameters which is for * Weighting.getMinWeight which is for A*. Note: we could make this step optional somehow for other algorithms, * but parsing would be still required in the next step for security reasons. * </li> * <li>2. parse condition value of priority and speed statements -> done in ConditionalExpressionVisitor (don't parse RHS expressions again) * </li> * <li>3. create class template as String, inject the created statements and create the Class * </li> * </ul> */ private static Class<?> createClazz(CustomModel customModel, EncodedValueLookup lookup, double globalMaxSpeed, double globalMaxPriority) { try { HashSet<String> priorityVariables = new LinkedHashSet<>(); // initial value of minimum has to be >0 so that multiple_by with a negative value leads to a negative value and not 0 MinMax minMaxPriority = new MinMax(1, globalMaxPriority); FindMinMax.findMinMax(priorityVariables, minMaxPriority, customModel.getPriority(), lookup); if (minMaxPriority.min < 0) throw new IllegalArgumentException("priority has to be >=0 but can be negative (" + minMaxPriority.min + ")"); if (minMaxPriority.max < 0) throw new IllegalArgumentException("maximum priority has to be >=0 but was " + minMaxPriority.max); List<Java.BlockStatement> priorityStatements = createGetPriorityStatements(priorityVariables, customModel, lookup); HashSet<String> speedVariables = new LinkedHashSet<>(); MinMax minMaxSpeed = new MinMax(1, globalMaxSpeed); FindMinMax.findMinMax(speedVariables, minMaxSpeed, customModel.getSpeed(), lookup); if (minMaxSpeed.min < 0) throw new IllegalArgumentException("speed has to be >=0 but can be negative (" + minMaxSpeed.min + ")"); if (minMaxSpeed.max <= 0) throw new IllegalArgumentException("maximum speed has to be >0 but was " + minMaxSpeed.max); List<Java.BlockStatement> speedStatements = createGetSpeedStatements(speedVariables, customModel, lookup); // Create different class name, which is required only for debugging. // TODO does it improve performance too? I.e. it could be that the JIT is confused if different classes // have the same name and it mixes performance stats. See https://github.com/janino-compiler/janino/issues/137 long counter = longVal.incrementAndGet(); String classTemplate = createClassTemplate(counter, priorityVariables, minMaxPriority.max, speedVariables, minMaxSpeed.max, lookup, CustomModel.getAreasAsMap(customModel.getAreas())); Java.CompilationUnit cu = (Java.CompilationUnit) new Parser(new Scanner("source", new StringReader(classTemplate))). parseAbstractCompilationUnit(); cu = injectStatements(priorityStatements, speedStatements, cu); SimpleCompiler sc = createCompiler(counter, cu); return sc.getClassLoader().loadClass("com.graphhopper.routing.weighting.custom.JaninoCustomWeightingHelperSubclass" + counter); } catch (Exception ex) { String errString = "Cannot compile expression"; throw new IllegalArgumentException(errString + ": " + ex.getMessage(), ex); } }
3.68
framework_ListSet_removeFromSet
/** * Removes "e" from the set if it no longer exists in the list. * * @param e */ private void removeFromSet(E e) { Integer dupl = duplicates.get(e); if (dupl != null) { // A duplicate was present so we only decrement the duplicate count // and continue if (dupl == 1) { // This is what always should happen. A sort sets the items one // by one, temporarily breaking the uniqueness requirement. duplicates.remove(e); } else { duplicates.put(e, dupl - 1); } } else { // The "old" value is no longer in the list. itemSet.remove(e); } }
3.68
morf_HumanReadableStatementProducer_changeColumn
/** @see org.alfasoftware.morf.upgrade.SchemaEditor#changeColumn(java.lang.String, org.alfasoftware.morf.metadata.Column, org.alfasoftware.morf.metadata.Column) **/ @Override public void changeColumn(String tableName, Column fromDefinition, Column toDefinition) { consumer.schemaChange(HumanReadableStatementHelper.generateChangeColumnString(tableName, fromDefinition, toDefinition)); }
3.68
hbase_StoreFileReader_passesTimerangeFilter
/** * Check if this storeFile may contain keys within the TimeRange that have not expired (i.e. not * older than oldestUnexpiredTS). * @param tr the timeRange to restrict * @param oldestUnexpiredTS the oldest timestamp that is not expired, as determined by the column * family's TTL * @return false if queried keys definitely don't exist in this StoreFile */ boolean passesTimerangeFilter(TimeRange tr, long oldestUnexpiredTS) { return this.timeRange == null ? true : this.timeRange.includesTimeRange(tr) && this.timeRange.getMax() >= oldestUnexpiredTS; }
3.68
flink_KeyGroupRangeOffsets_getIntersection
/** * Returns a key-group range with offsets which is the intersection of the internal key-group * range with the given key-group range. * * @param keyGroupRange Key-group range to intersect with the internal key-group range. * @return The key-group range with offsets for the intersection of the internal key-group range * with the given key-group range. */ public KeyGroupRangeOffsets getIntersection(KeyGroupRange keyGroupRange) { Preconditions.checkNotNull(keyGroupRange); KeyGroupRange intersection = this.keyGroupRange.getIntersection(keyGroupRange); long[] subOffsets = new long[intersection.getNumberOfKeyGroups()]; if (subOffsets.length > 0) { System.arraycopy( offsets, computeKeyGroupIndex(intersection.getStartKeyGroup()), subOffsets, 0, subOffsets.length); } return new KeyGroupRangeOffsets(intersection, subOffsets); }
3.68
framework_Button_getRelativeX
/** * Returns the relative mouse position (x coordinate) when the click * took place. The position is relative to the clicked component. * * @return The mouse cursor x position relative to the clicked layout * component or -1 if no x coordinate available */ public int getRelativeX() { if (null != details) { return details.getRelativeX(); } else { return -1; } }
3.68
shardingsphere-elasticjob_HandlerMappingRegistry_addMapping
/** * Add a Handler for a path pattern. * * @param method HTTP method * @param pathPattern path pattern * @param handler handler */ public void addMapping(final HttpMethod method, final String pathPattern, final Handler handler) { UrlPatternMap<Handler> urlPatternMap = mappings.computeIfAbsent(method, httpMethod -> new RegexUrlPatternMap<>()); urlPatternMap.put(pathPattern, handler); }
3.68
flink_LegacySinkTransformation_getOperatorFactory
/** Returns the {@link StreamOperatorFactory} of this {@code LegacySinkTransformation}. */ public StreamOperatorFactory<Object> getOperatorFactory() { return operatorFactory; }
3.68
hbase_BlockCacheUtil_getSize
/** Returns size of blocks in the cache */ public long getSize() { return size; }
3.68
hadoop_StageConfig_withJobAttemptNumber
/** * Set the job attempt number. * @param value new value * @return this */ public StageConfig withJobAttemptNumber(final int value) { checkOpen(); jobAttemptNumber = value; return this; }
3.68