Search is not available for this dataset
content
stringlengths
0
376M
--- networkRows: 3 networkCols: 2 foxNetworkStages: 2 foxNodeFifos: peToNetwork: 8 networkToPe: 1024 resultNodeCoord: x: 0 y: 0 resultNodeFifos: peToNetwork: 8 networkToPe: 1024 resultUartFifoDepth: 1024 romNodeCoord: x: 0 y: 2 packetFormat: multicastGroupBits: 1 multicastCoordBits: 2 readyFlagBits: 1 resultFlagBits: 1 matrixTypeBits: 1 matrixCoordBits: 8 matrixElementBits: 32 useMatrixInitFile: True multicastAvailable: True useMulticast: True multicastConfig: multicastGroupNodes: 2 multicastNetworkRows: 2 multicastNetworkCols: 1 multicastFifoDepth: 2
language: cpp cache: ccache env: global: - VERILATOR_CACHE=$HOME/verilator_cache - VERILATOR_ROOT=$PWD/verilator - VERILATOR_NUM_JOBS=$((`nproc` + 1)) - OBJCACHE=ccache - PATH=$PATH:$VERILATOR_ROOT/bin - VERILATOR=$VERILATOR_ROOT/bin/verilator - RV_ROOT=$PWD cache: directories: - $VERILATOR_CACHE before_install: - git clone https://github.com/verilator/verilator.git $VERILATOR_ROOT # This is the first revision with the build_verilator.sh script. # Once a Verilator release captures this, it would be best to check out # a release tag instead. - git -C $VERILATOR_ROOT checkout 46ab907f6afdcacbf848ea090dce66f1334de663 before_script: - $VERILATOR_ROOT/ci/build_verilator.sh stages: - build - test jobs: include: - stage: build name: Build Verilator script: echo "Done building Verilator" - stage: test name: Run Tests script: - configs/swerv.config -snapshot=mybuild - make -j 4 -f tools/Makefile verilator snapshot=mybuild - make -f tools/Makefile verilator-run snapshot=mybuild |& tee sim.log - grep -q "Hello World from SweRV" sim.log
# Technology Setup # Technology used is ASAP7 vlsi.core.technology: asap7 # Specify dir with ASAP7 tarball technology.asap7.tarball_dir: "" vlsi.core.max_threads: 12 # General Hammer Inputs # Hammer will auto-generate a CPF for simple power designs; see hammer/src/hammer-vlsi/defaults.yml for more info vlsi.inputs.power_spec_mode: "auto" vlsi.inputs.power_spec_type: "cpf" # Specify clock signals vlsi.inputs.clocks: [ {name: "clock", period: "1ns", uncertainty: "0.1ns"} ] # Generate Make include to aid in flow vlsi.core.build_system: make # Power Straps par.power_straps_mode: generate par.generate_power_straps_method: by_tracks par.blockage_spacing: 2.0 par.generate_power_straps_options: by_tracks: strap_layers: - M3 - M4 - M5 - M6 - M7 - M8 - M9 pin_layers: - M9 track_width: 7 # minimum allowed for M2 & M3 track_spacing: 0 track_spacing_M3: 1 # to avoid M2 shorts at higher density track_start: 10 power_utilization: 0.05 power_utilization_M8: 1.0 power_utilization_M9: 1.0 # Placement Constraints # For ASAP7, all numbers must be 4x larger than final GDS vlsi.inputs.placement_constraints: - path: "Sha3AccelwBB" type: toplevel x: 0 y: 0 width: 300 height: 300 margins: left: 0 right: 0 top: 0 bottom: 0 - path: "Sha3AccelwBB/dco" type: hardmacro x: 108 y: 108 width: 128 height: 128 orientation: r0 top_layer: M9 - path: "Sha3AccelwBB/place_obs_bottom" type: obstruction obs_types: ["place"] x: 0 y: 0 width: 300 height: 1.08 # 1 core site tall, necessary to avoid shorts # Pin placement constraints vlsi.inputs.pin_mode: generated vlsi.inputs.pin.generate_mode: semi_auto vlsi.inputs.pin.assignments: [ {pins: "*", layers: ["M5", "M7"], side: "bottom"} ] # Paths to extra libraries vlsi.technology.extra_libraries_meta: ["append", "deepsubst"] vlsi.technology.extra_libraries: - library: nldm liberty file_deepsubst_meta: "local" nldm liberty file: "extra_libraries/example/ExampleDCO_PVT_0P63V_100C.lib" lef file_deepsubst_meta: "local" lef file: "extra_libraries/example/ExampleDCO.lef" gds file_deepsubst_meta: "local" gds file: "extra_libraries/example/ExampleDCO.gds" corner: nmos: "slow" pmos: "slow" temperature: "100 C" supplies: VDD: "0.63 V" GND: "0 V" - library: nldm liberty file_deepsubst_meta: "local" nldm liberty file: "extra_libraries/example/ExampleDCO_PVT_0P77V_0C.lib" lef file_deepsubst_meta: "local" lef file: "extra_libraries/example/ExampleDCO.lef" gds file_deepsubst_meta: "local" gds file: "extra_libraries/example/ExampleDCO.gds" corner: nmos: "fast" pmos: "fast" temperature: "0 C" supplies: VDD: "0.77 V" GND: "0 V" # Because the DCO is a dummy layout, we treat it as a physical-only cell par.inputs.physical_only_cells_mode: append par.inputs.physical_only_cells_list: - ExampleDCO # SRAM Compiler compiler options vlsi.core.sram_generator_tool: "sram_compiler" # You should specify a location for the SRAM generator in the tech plugin vlsi.core.sram_generator_tool_path: [] vlsi.core.sram_generator_tool_path_meta: "append" # Tool options. Replace with your tool plugin of choice. # Genus options vlsi.core.synthesis_tool: "genus" vlsi.core.synthesis_tool_path: ["hammer-cadence-plugins/synthesis"] vlsi.core.synthesis_tool_path_meta: "append" synthesis.genus.version: "1813" # Innovus options vlsi.core.par_tool: "innovus" vlsi.core.par_tool_path: ["hammer-cadence-plugins/par"] vlsi.core.par_tool_path_meta: "append" par.innovus.version: "181" par.innovus.design_flow_effort: "standard" par.inputs.gds_merge: true # Calibre options vlsi.core.drc_tool: "calibre" vlsi.core.drc_tool_path: ["hammer-mentor-plugins/drc"] vlsi.core.lvs_tool: "calibre" vlsi.core.lvs_tool_path: ["hammer-mentor-plugins/lvs"]
sudo: required dist: xenial language: python git: depth: 1 submodules: true cache: - apt: true - pip : true matrix: include: - name: "Python 2.7 gcc-8" python: 2.7 env: CC=gcc-8 CXX=g++-8 addons: apt: sources: - ubuntu-toolchain-r-test packages: - g++-8 - gcc-8 - name: "Python 3.5 gcc-8" python: 3.5 env: CC=gcc-8 CXX=g++-8 addons: apt: sources: - ubuntu-toolchain-r-test packages: - g++-8 - gcc-8 - name: "Python 3.7 gcc-8 Debug+Coverage" python: 3.7 env: CC=gcc-8 CXX=g++-8 BUILD_ARGS="--build-type Debug -- -DCODE_COVERAGE=ON" CODE_COVERAGE=1 addons: apt: sources: - ubuntu-toolchain-r-test packages: - g++-8 - gcc-8 - - name: "Python 3.7 gcc-7 (deploy on tag)" python: 3.7 env: CC=gcc-7 CXX=g++-7 DO_DELPLOY=1 addons: apt: sources: - ubuntu-toolchain-r-test packages: - g++-7 - gcc-7 addons: apt: sources: - ubuntu-toolchain-r-test #- llvm-toolchain-precise-3.9 packages: - build-essential - cython # command to install dependencies install: - sudo cp .travis/disco.list /etc/apt/sources.list.d/disco.list - sudo apt update - if [ "$CODE_COVERAGE" == "1" ]; sudo update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-8 10; then sudo apt install -y lcov; gem install coveralls-lcov; fi - sudo apt install libantlr4-runtime-dev antlr4 -y - pip install -r requirements.txt - python setup.py build $BUILD_ARGS - python setup.py install $BUILD_ARGS script: - python setup.py test $BUILD_ARGS # the scikit-build automatically executes setup.py develop which requires access to installation dir after_success: - lcov --compat-libtool --directory . --capture --output-file coverage.info.raw - lcov --remove coverage.info.raw -o coverage.info '/usr/*' './_skbuild/*' - coveralls-lcov coverage.info deploy: provider: pypi user: nic30 password: secure: <KEY> on: tags: true branch: master condition: $DO_DELPLOY = 1
<gh_stars>0 --- # Copyright 2021 Datum Technology Corporation # SPDX-License-Identifier: Apache-2.0 WITH SHL-2.1 ######################################################################################################################## # Licensed under the Solderpad Hardware License v 2.1 (the "License"); you may not use this file except in compliance # with the License, or, at your option, the Apache License version 2.0. You may obtain a copy of the License at # https://solderpad.org/licenses/SHL-2.1/ # Unless required by applicable law or agreed to in writing, any work distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. ######################################################################################################################## ## README # This is an EDApp Metadata file that describes ??? core: uid: ef15c1cc-2d1d-4fe6-b3c1-c9872ed1f221 name: "eth_mac_10g" type: "rtl-ip" aliases: full-name: "Ethernet 10G MAC" version: "0.0.1" release-date: 2021-03-28 description: > RTL implementation of 802.3 MAC for 10G. keywords: [ "eth", "ethernet", "802.3" ] catalog: "https://moore.io/ip/catalog" suite: copyright-holders: ["Datum Technology Corporation"] logo: "https://moore.io/assets/images/eth_mac_10g.svg" license: "SHL-2.1" private: False rtl-ip: type: sub-type: sub-sub-type: block-diagram: languages: - { name: "system-verilog", version: "^" } dependencies : - { name: "mio@fifos_rtl", version: "^", notes: "" } - { name: "mio@counters_rtl", version: "^", notes: "" } - { name: "<EMAIL>@mem_if_rtl", version: "^", notes: "" } simulators-supported: - { name: "xilinx", version: "2020.2", level: 100, notes: "" } - { name: "xilinx", version: "2019.2", level: 75, notes: "" } community: contributors: - { name: "<NAME>", org: "Datum Technology Corporation", email: "<EMAIL>", linked-in: "https://www.linkedin.com/in/david-poulin-24674734/" } home-page: "https://github.com/Datum-Technology-Corporation/mio_eth/projects/1" repository: type: "git" url: "https://github.com/Datum-Technology-Corporation/mio_eth" bugs: url: "https://github.com/Datum-Technology-Corporation/mio_et/issues" email: "<EMAIL>" funding: type: "" url: "" structure: scripts-paths: [ "/bin" ] docs-paths: [ "/docs" ] examples-paths: [ "/examples" ] src-paths: [ "/src" ] hdl-src: files: ["/*/*.sv", "/*/*.svh", "/*/*.sv"] top-files: [ "/uvma_axil_pkg.sv" ] top-modules: tests-paths: [ "/ut" ] compilation-arguments: ["+define+FIFOS_RTL_MAX_DEPTH=1_024"] events: pre-git-pull: post-git-pull: - '$refactor.disconnect("*/*")' - '$refactor.connect("*/*")' pre-init: post-init: pre-library: post-library: pre-compilation: post-compilation: pre-elaboration: post-elaboration: pre-simulation: post-simulation: pre-results-processing: post-results-processing: pre-results-upload: post-results-upload: pre-shutdown: pre-git-commit: [ 'pre-commit.py' ] post-git-commit: parameters: git-pull: init: library: compilation: - { name: "enable-10g", type: "boolean", cli-str: "+define+ETH_MAC_10G25G_RTL_ENABLE_10G", default: True } - { name: "enable-25g", type: "boolean", cli-str: "+define+ETH_MAC_10G25G_RTL_ENABLE_25G", default: True } elaboration: simulation: results-processing: results-upload: shutdown: configuration : # data store for the IP template-metadata: name: "" url: "" input-parameters: null
<filename>.gitlab-ci.yml stages: - uhd - gnuradio - fpga .conditionals: &conditionals refs: - master - branches - web .uhd-build: &uhd-build - mkdir build - cd build - cmake ../ -DENABLE_UHD=1 -DENABLE_GNURADIO=0 - make -j10 .gnuradio-build: &gnuradio-build - mkdir build - cd build - cmake ../ -DENABLE_UHD=1 -DENABLE_GNURADIO=1 - make -j10 .fpga-script: &fpga-script - rm -rf ../uhd-fpga && git clone -b UHD-3.13 https://github.com/EttusResearch/fpga.git ../uhd-fpga - export UHD_FPGA_DIR=`pwd`/../uhd-fpga - source /opt/Xilinx/Vivado/2017.4/settings64.sh - cd fpga-rfnoc/testbenches - for d in ./*/ ; do (cd "$d" && pwd && make clean && ./runtestbench.sh); done build-uhd-3.13: stage: uhd image: theseuscores/uhd:UHD-3.13-rfnoc-all script: *uhd-build only: *conditionals build-uhd-3.14: stage: uhd image: theseuscores/uhd:UHD-3.14-rfnoc-all script: *uhd-build only: *conditionals build-gnuradio-maint-uhd-3.13: stage: gnuradio image: theseuscores/gnuradio:maint-3.7-UHD-3.13-rfnoc script: *gnuradio-build only: *conditionals build-gnuradio-maint-uhd-3.14: stage: gnuradio image: theseuscores/gnuradio:maint-3.7-UHD-3.14-rfnoc script: *gnuradio-build only: *conditionals
<reponame>e7p/hal<filename>deps/spdlog-1.5.0/appveyor.yml version: 1.0.{build} image: Visual Studio 2017 environment: matrix: - GENERATOR: '"Visual Studio 14 2015"' BUILD_TYPE: Debug WCHAR: 'OFF' - GENERATOR: '"Visual Studio 14 2015"' BUILD_TYPE: Release WCHAR: 'ON' - GENERATOR: '"Visual Studio 14 2015 Win64"' BUILD_TYPE: Debug WCHAR: 'ON' - GENERATOR: '"Visual Studio 14 2015 Win64"' BUILD_TYPE: Release WCHAR: 'ON' - GENERATOR: '"Visual Studio 15 2017 Win64"' BUILD_TYPE: Debug WCHAR: 'ON' - GENERATOR: '"Visual Studio 15 2017 Win64"' BUILD_TYPE: Release WCHAR: 'OFf' build_script: - cmd: >- set mkdir build cd build set PATH=%PATH%:C:\Program Files\Git\usr\bin cmake .. -G %GENERATOR% -DCMAKE_BUILD_TYPE=%BUILD_TYPE% -DSPDLOG_WCHAR_SUPPORT=%WCHAR% -DSPDLOG_BUILD_EXAMPLE=ON -DSPDLOG_BUILD_EXAMPLE_HO=ON -DSPDLOG_BUILD_TESTS=ON -DSPDLOG_BUILD_TESTS_HO=OFF cmake --build . --config %BUILD_TYPE% test_script: - ctest -VV -C "%BUILD_TYPE%"
name: CI # Controls when the workflow will run on: # Triggers the workflow on push or pull request events but only for the master branch push: branches: [ master ] pull_request: branches: [ master ] # Allows you to run this workflow manually from the Actions tab workflow_dispatch: # A workflow run is made up of one or more jobs that can run sequentially or in parallel jobs: test-nutshell: # The type of runner that the job will run on runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Prepare environment run: | cd $GITHUB_WORKSPACE/.. git config --global url."https://github.com/".insteadOf <EMAIL>@github.com: git config --global url."https://".insteadOf git:// git clone https://github.com/OpenXiangShan/xs-env cd $GITHUB_WORKSPACE/../xs-env sudo -s ./setup-tools.sh source ./setup.sh rm -r $GITHUB_WORKSPACE/../xs-env/NutShell cp -r $GITHUB_WORKSPACE $GITHUB_WORKSPACE/../xs-env cd $GITHUB_WORKSPACE/../xs-env source ./env.sh cd $GITHUB_WORKSPACE/../xs-env/NutShell source ./env.sh make init - name: Microbench - Nutshell run: | cd $GITHUB_WORKSPACE/../xs-env source ./env.sh cd $GITHUB_WORKSPACE/../xs-env/NutShell source ./env.sh make clean make emu EMU_CXX_EXTRA_FLAGS="-DFIRST_INST_ADDRESS=0x80000000" ./build/emu -b 0 -e 0 -i ./ready-to-run/microbench.bin # - name: Microbench - Argo # run: | # cd $GITHUB_WORKSPACE/../xs-env # source ./env.sh # cd $GITHUB_WORKSPACE/../xs-env/NutShell # source ./env.sh # make clean # make emu CORE=ooo EMU_CXX_EXTRA_FLAGS="-DFIRST_INST_ADDRESS=0x80000000" # ./build/emu -b 0 -e 0 -i ./ready-to-run/microbench.bin
# Adapted from Garnet and ButterPHY name: phase_interpolator commands: - | mkdir -p outputs tar -xvf /home/sjkim85/dragonphy_tarballs/phase_interpolator-latest.tar.gz -C outputs mv outputs/phase_interpolator-0.2.0/* outputs/ outputs: - phase_interpolator.lef - phase_interpolator.gds - phase_interpolator.spi - phase_interpolator.version
<reponame>geegeea/opentitan<filename>hw/vendor/tinyfpga_tinyfpga-bootloader/.travis.yml language: python jobs: include: - stage: test python: 2.7 env: TOXENV=py27 name: "Python 2.7" - python: 3.4 env: TOXENV=py34 name: "Python 3.4" - python: 3.5 env: TOXENV=py35 name: "Python 3.5" - python: 3.6 env: TOXENV=py36 name: "Python 3.6" - stage: deploy python: 3.6 name: "Upload dev version to PyPi" script: ./.push-to-pypi.sh stages: - test - name: deploy if: branch = master install: - git fetch --tags - cd programmer - pip install --upgrade pip - pip install --upgrade tox - pip install --upgrade setuptools - pip install --upgrade setuptools_scm - pip install --upgrade wheel - pip install --upgrade twine script: - tox notifications: email: false git: depth: false
<filename>flatsat/mosquitto/docker-compose.yml<gh_stars>1-10 version: "3.6" services: mosquitto: image: eclipse-mosquitto:2-openssl ports: - 1883:1883 networks: - cedalo-platform volumes: - ./mosquitto/config:/mosquitto/config - ./mosquitto/data:/mosquitto/data management-center: image: cedalo/management-center:2 ports: - 8088:8088 depends_on: - mosquitto networks: - cedalo-platform environment: CEDALO_MC_BROKER_ID: mosquitto-2.0 CEDALO_MC_BROKER_NAME: Mosquitto 2.0 CEDALO_MC_BROKER_URL: mqtt://mosquitto:1883 CEDALO_MC_BROKER_USERNAME: cedalo CEDALO_MC_BROKER_PASSWORD: <PASSWORD> CEDALO_MC_USERNAME: cedalo CEDALO_MC_PASSWORD: <PASSWORD> networks: cedalo-platform: name: cedalo-platform driver: bridge
<gh_stars>10-100 sudo: required language: python os: linux addons: apt: update: false packages: - lib32z1 - lib32stdc++6 - libexpat1:i386 - libc6:i386 - libsm6:i386 - libncurses5:i386 - libx11-6:i386 - zlib1g:i386 - libxext6:i386 - libxft2:i386 install: - pip install -r requirements.txt - stat /home/travis/intelFPGA/19.1/modelsim_ase || (curl 'http://download.altera.com/akdlm/software/acdsinst/19.1std/670/ib_installers/ModelSimSetup-19.1.0.670-linux.run' -o ModelSimSetup.run && chmod +x ModelSimSetup.run && travis_wait 30 ./ModelSimSetup.run --mode unattended --accept_eula 1 && sed -i 's/linux_rh60/linux/g' /home/travis/intelFPGA/19.1/modelsim_ase/vco ) script: - export PATH=$PATH:/home/travis/intelFPGA/19.1/modelsim_ase/bin - cd ./sim/as4c4m16sa_tb/ && hdlmake fetch && hdlmake && make - cd - cache: directories: - /home/travis/intelFPGA/
<gh_stars>10-100 version: '2' services: verilator: build: . image: aignacio/riscv_verilator_model:latest restart: always ports: - "8080:8080"
<filename>.gitpod.yml image: file: .gitpod.Dockerfile tasks: - init: echo "Starting init" | make BUILD_TYPE=debug github: prebuilds: # enable for the master/default branch (defaults to true) master: true # enable for all branches in this repo (defaults to false) branches: true # enable for pull requests coming from this repo (defaults to true) pullRequests: true # enable for pull requests coming from forks (defaults to false) pullRequestsFromForks: true # add a "Review in Gitpod" check to pull requests (defaults to true) addCheck: true # add a label once the prebuild is ready to pull requests (defaults to false) addLabel: prebuilt-in-gitpod vscode: extensions: - [email protected]:qLtqI3aUcEBX9EpuK0ZCyw== - [email protected]:Pq/tmf2WN3SanVzB4xZc1g==
<reponame>SHirsch78/XSharpDev #See https://docs.github.com/en/actions/configuring-and-managing-workflows/configuring-a-workflow for help name: ContinuousBuild #Set when this build is activated. This can be once x time period, or on push/commit of certain branches on: schedule: - cron: '0 2 * * *' # every night at 2 am UTC pull_request: types: [opened, assigned, synchronize, edited] workflow_dispatch: env: DOTNET_NOLOGO: true DOTNET_CLI_TELEMETRY_OPTOUT: true jobs: job1: name: BuildXSharpCompiler runs-on: windows-2019 #Specify the OS to execute on steps: - uses: actions/checkout@v2 with: ref: main # - uses: actions/[email protected] # with: # dotnet-version: '3.1.402' # SDK Version to use; should match version in global.json and build/Targets/Tools.props - name: BuildCompiler shell: cmd run: | echo "RoslynDir=./Roslyn" >> $GITHUB_ENV echo "XSharpDir=./XSharp" >> $GITHUB_ENV call ContinuousIntegrationBuild.cmd - name: Upload Results uses: actions/upload-artifact@v2 with: name: BuildResults path: ./XSharp/Artifacts/Zips/*.zip - name: Upload BuildLogs uses: actions/upload-artifact@v2 with: name: BuildLogs path: ./XSharp/build*.Log - name: RunTests shell: cmd run: | call RunCompilertests.cmd - name: Upload TestLogs uses: actions/upload-artifact@v2 with: name: TestLogs path: ./XSharp/artifacts/Tests/*.Log
<reponame>parzival3/Surelog - test: riscv_instr_cov_test description: > Parse the instruction information from the CSV trace log, sample functional coverage from the instruction trace. iterations: 1 gen_test: riscv_instr_cov_test no_iss: 1 no_gcc: 1 no_post_compare: 1 - test: exp_riscv_instr_cov_test description: > Parse the instruction information from the CSV trace log, sample functional coverage from the instruction trace. iterations: 1 gen_test: exp_riscv_instr_cov_test no_iss: 1 no_gcc: 1 no_post_compare: 1
name: run test cases on: push: branches: [ master ] pull_request: branches: [ master ] jobs: testing: strategy: matrix: os: [ubuntu-20.04] compiler: [gcc, clang] runs-on: ${{ matrix.os }} env: CC: ${{ matrix.compiler }} steps: - uses: actions/checkout@v2 - name: install bats run: | curl -L -o bats-core-1.2.1.tar.gz https://github.com/bats-core/bats-core/archive/v1.2.1.tar.gz tar zxvf bats-core-1.2.1.tar.gz cd bats-core-1.2.1 && sudo ./install.sh /usr/local - name: install uncrustify run: | curl -LO http://launchpadlibrarian.net/516341795/uncrustify_0.72.0+dfsg1-2_amd64.deb sudo dpkg -i uncrustify_0.72.0+dfsg1-2_amd64.deb || true - name: build packcc run: | ( cd build/$CC make all check )
<gh_stars>0 version: v1.0 name: End House Software Pipeline agent: machine: type: e1-standard-2 os_image: ubuntu1804 blocks: - name: DeployProcesses task: jobs: - name: PreDeploy commands: - checkout - echo 'Running Semaphore Processes...'
<gh_stars>10-100 hosts: myhost: processes: myprocess: components: mycore: config_file: core_config.py type: core experiment_dummy1: class: test.util.experiments.StorageExperiment config: dummy_verbose: false type: experiment experiment_dummy2: class: test.util.experiments.StorageExperiment config: dummy_verbose: false type: experiment mylab: config_file: lab_config.py type: laboratory config_file: process_config.py
<gh_stars>0 vlsi.inputs.placement_constraints: - path: "ChipTop" type: toplevel x: 0 y: 0 width: 2920 height: 3520 margins: left: 0 right: 0 top: 0 bottom: 0 - path: "ChipTop/system/tile_prci_domain/tile_reset_domain/tile/dcache/data/data_arrays_0/data_arrays_0_ext/mem_0_0" type: hardmacro x: 1000 y: 1000 orientation: r0 top_layer: "met4" - path: "ChipTop/system/tile_prci_domain/tile_reset_domain/tile/frontend/icache/data_arrays_0/data_arrays_0_0_ext/mem_0_0" type: hardmacro x: 2500 y: 1800 orientation: r0 top_layer: "met4" - path: "ChipTop/system/tile_prci_domain/tile_reset_domain/tile/frontend/icache/tag_array/tag_array_ext/mem_0_0" type: hardmacro x: 1000 y: 2600 orientation: r0 top_layer: "met4"
<filename>.readthedocs.yml version: 2 formats: all sphinx: configuration: docs/conf.py python: install: - requirements: docs/requirements.txt
<filename>2021/quals/kctf/challenge-templates/web/metadata.yaml # Human readable task name name: web 2000 # Long form description. description: |+ In this challenge, you have to find an XSS. Good luck! # The flag flag: CTF{foobar} # Task category. (one of hardware, crypto, pwn, reversing, web, net, misc) category: web # === the fields below will be filled by SRE or automation === # Task label label: '' # URL for web challenges link: '' # host/port for non-web challenges host: '' # the URL for attachments, to be filled in by automation attachment: '' # is this challenge released? Will be set by SREs visible: false
language: c matrix: include: - os: linux compiler: gcc cache: apt dist: xenial - os: osx compiler: clang osx_image: xcode11.5 env: global: - SHORT_COMMIT_HASH=`git rev-parse --short HEAD` - VERSION_STRING=nightly-$SHORT_COMMIT_HASH - BUILD_NAME="HackRF-`date +%Y-%m-%d`-$SHORT_COMMIT_HASH" - ARTEFACT_BASE=$TRAVIS_BUILD_DIR/artefacts/ - ARTEFACT_PATH=$ARTEFACT_BASE/$BUILD_NAME addons: apt: packages: - libusb-1.0-0-dev - libfftw3-dev - dfu-util - gcc-arm-none-eabi - libnewlib-arm-none-eabi - libstdc++-arm-none-eabi-newlib - python-yaml homebrew: taps: armmbed/formulae packages: - fftw - arm-none-eabi-gcc - dfu-util before_install: - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then pip install PyYAML; fi before_script: - export CFLAGS="-Wall -Wextra -Werror" script: # Host code - mkdir host/build - cd host/build - cmake .. - make # Firmware # Set version string - cd ../../firmware - sed -e "s/\#set(VERSION.*/set(VERSION \"$VERSION_STRING\")/" -i".bak" hackrf-common.cmake - mkdir build-hackrf-one - mkdir build-jawbreaker - mkdir build-rad1o - cd libopencm3 - make - cd ../build-hackrf-one - cmake .. - make - cd ../build-jawbreaker - cmake -DBOARD=JAWBREAKER .. - make - cd ../build-rad1o - cmake -DBOARD=RAD1O .. - make after_success: # Construct archive for deploying to ubertooth-nightlies - mkdir -p $ARTEFACT_PATH/firmware-bin/ # Export HackRF git repo - cd $TRAVIS_BUILD_DIR/ - git archive --format=tar HEAD | (cd $ARTEFACT_PATH && tar xf -) # Set version string - sed -e "s/\#set(RELEASE.*/set(RELEASE \"$VERSION_STRING\")/" -i".bak" $ARTEFACT_PATH/host/cmake/set_release.cmake # Copy firmware to firmware-bin directory - cd $TRAVIS_BUILD_DIR/firmware/ - cp cpld/sgpio_if/default.xsvf $ARTEFACT_PATH/firmware-bin/hackrf_cpld_default.xsvf - cp build-hackrf-one/hackrf_usb/hackrf_usb.bin $ARTEFACT_PATH/firmware-bin/hackrf_one_usb.bin - cp build-hackrf-one/hackrf_usb/hackrf_usb.dfu $ARTEFACT_PATH/firmware-bin/hackrf_one_usb.dfu - cp build-jawbreaker/hackrf_usb/hackrf_usb.bin $ARTEFACT_PATH/firmware-bin/hackrf_jawbreaker_usb.bin - cp build-jawbreaker/hackrf_usb/hackrf_usb.dfu $ARTEFACT_PATH/firmware-bin/hackrf_jawbreaker_usb.dfu - cp build-rad1o/hackrf_usb/hackrf_usb.bin $ARTEFACT_PATH/firmware-bin/rad1o_usb.bin - cp build-rad1o/hackrf_usb/hackrf_usb.dfu $ARTEFACT_PATH/firmware-bin/rad1o_usb.dfu # Build the archive - cd $ARTEFACT_BASE - tar -cJvf $ARTEFACT_BASE/$BUILD_NAME.tar.xz $BUILD_NAME deploy: provider: script skip-cleanup: true script: bash $TRAVIS_BUILD_DIR/tools/deploy-nightly.sh on: branch: master
<reponame>draperlaboratory/opentitan # Copyright lowRISC contributors. # Licensed under the Apache License, Version 2.0, see LICENSE for details. # SPDX-License-Identifier: Apache-2.0 gen-weights: Branch: 0.1 ECall: 1.0 Jump: 0.1 Loop: 0.1 StraightLineInsn: 1.0
<filename>.travis.yml language: minimal install: skip services: docker before_install: docker pull ghdl/vunit:llvm script: docker run --rm -t -v `pwd`:/build -w /build ghdl/vunit:llvm bash -c "apt update && apt install -y python3-pexpect && make GNATMAKE='gnatmake -j'$(nproc) && if [ -n \"$TRAVIS_FULL_CHECK\" ] ; then make -j$(nproc) check; else make -j$(nproc) check_light ; fi"
<filename>.github/workflows/ecosystem-compat.yml # Copyright cocotb contributors # Licensed under the Revised BSD License, see LICENSE for details. # SPDX-License-Identifier: BSD-3-Clause # Tests to ensure that projects depending on cocotb continue to work with the # latest development version of cocotb. # # Generally, we test the development version of cocotb against supported, # released versions of the other projects. (It is expected that the projects # themselves test their in-development code against the released version of # cocotb.) name: Ecosystem compatibility tests on: # Run daily at midnight (UTC). schedule: - cron: '0 0 * * *' # Allow triggering a CI run from the web UI. workflow_dispatch: jobs: cocotb-coverage: name: Test cocotb-coverage 1.1 runs-on: ubuntu-latest steps: - name: Set up Python uses: actions/setup-python@v2 with: python-version: "3.10" - name: Install Icarus Verilog run: sudo apt install -y --no-install-recommends iverilog - name: Checkout cocotb repository uses: actions/checkout@v2 with: path: cocotb - name: Install the development version of cocotb run: pip3 install ./cocotb - name: Checkout cocotb-coverage repository uses: actions/checkout@v2 with: repository: mciepluc/cocotb-coverage path: cocotb-coverage - name: Install the release version of cocotb-coverage run: pip3 install cocotb-coverage==1.1 - name: Run tests # Don't run tests through tox (as present in cocotb-coverage) to be able # to override the cocotb dependency. run: | pip3 install pytest cd cocotb-coverage export SIM=icarus make -k -C tests
<filename>ivpm.yaml<gh_stars>1-10 package: name: tblink-rpc-hdl version: 0.0.1 deps: - name: cython src: pypi - name: tblink-rpc-core url: https://github.com/tblink-rpc/tblink-rpc-core.git dev-deps: - name: cython src: pypi - name: tblink-rpc-core url: https://github.com/tblink-rpc/tblink-rpc-core.git - name: tblink-rpc-utils url: https://github.com/tblink-rpc/tblink-rpc-utils.git - name: pytblink-rpc url: https://github.com/tblink-rpc/pytblink-rpc.git - name: tblink-bfms-rv url: https://github.com/tblink-bfms/tblink-bfms-rv.git - name: mkdv url: https://github.com/fvutils/mkdv.git branch: schema-redo - name: cocotb src: pypi - name: googletest url: https://github.com/google/googletest/archive/release-1.10.0.tar.gz - name: uvm url: https://www.accellera.org/images/downloads/standards/uvm/UVM-18002-2020-11tar.gz src: tgz
<filename>models/AI-Model-Zoo/model-list/pt_centerpoint_astyx_2560_40_54G_2.0/model.yaml # Copyright 2019 Xilinx Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. description: CenterPoint on Astyx 4D radar data. input size: 2560*40*4 float ops: 54G task: 3d detection framework: pytorch prune: 'no' version: 2.0 files: - name: pt_centerpoint_astyx_2560_40_54G_2.0 type: float & quantized board: GPU download link: https://www.xilinx.com/bin/public/openDownload?filename=pt_centerpoint_astyx_2560_40_54G_2.0.zip checksum: 944e897676077e50df489662995f5f59 - name: centerpoint_0_pt type: xmodel board: zcu102 & zcu104 & kv260 download link: https://www.xilinx.com/bin/public/openDownload?filename=centerpoint_0_pt-zcu102_zcu104_kv260-r2.0.0.tar.gz checksum: 78d0d174dc2fd2be06fa7e7ddef301bb - name: centerpoint_1_pt type: xmodel board: zcu102 & zcu104 & kv260 download link: https://www.xilinx.com/bin/public/openDownload?filename=centerpoint_1_pt-zcu102_zcu104_kv260-r2.0.0.tar.gz checksum: c9bf16de1618980a6fade8c6ea82efbc - name: centerpoint_0_pt type: xmodel board: vck190 download link: https://www.xilinx.com/bin/public/openDownload?filename=centerpoint_0_pt-vck190-r2.0.0.tar.gz checksum: 123de8385c035a468f85f65e7930d433 - name: centerpoint_1_pt type: xmodel board: vck190 download link: https://www.xilinx.com/bin/public/openDownload?filename=centerpoint_1_pt-vck190-r2.0.0.tar.gz checksum: f60c9411d959d84df78add28fd2c1060 license: https://github.com/Xilinx/Vitis-AI/blob/master/LICENSE
<filename>2021/quals/kctf/challenge-templates/web/challenge.yaml apiVersion: kctf.dev/v1 kind: Challenge metadata: name: apache-others spec: deployed: true powDifficultySeconds: 0 network: public: false ports: - protocol: "HTTPS" targetPort: 1337 healthcheck: # TIP: disable the healthcheck during development enabled: true horizontalPodAutoscalerSpec: maxReplicas: 10 minReplicas: 2 targetCPUUtilizationPercentage: 80 podTemplate: template: spec: containers: - name: 'challenge' resources: requests: memory: "8Gi" cpu: "4000m"
<filename>examples/mydc7z015/default/config.yml --- name: default board: boards/mydc7z015 cores: - fpga/cores/axi_ctl_register_v1_0 - fpga/cores/axi_sts_register_v1_0 - fpga/cores/dna_reader_v1_0 memory: - name: control offset: '0x60000000' range: 4K - name: status offset: '0x50000000' range: 4K - name: xadc offset: '0x43C00000' range: 64K control_registers: - led status_registers: - forty_two parameters: fclk0: 50000000 # FPGA clock speed in Hz xdc: - ./constraints.xdc drivers: - server/drivers/common.hpp - ./monitor.hpp web: - ./web/index.html - web/koheron.ts - ./web/monitor.ts - ./web/app.ts - web/main.css
on: [push, pull_request] jobs: lint: runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v1 - name: Lint Verilog sources with Verilator uses: librecores/ci-fusesoc-action@master with: command: 'run' core: 'openhw:cv32e40p:core' target: 'lint' tool: 'verilator' pre-run-command: | pip3 install git+https://github.com/olofk/fusesoc@master make
<reponame>captainko/vim-matchup name: Linting and style checking on: [push, pull_request] jobs: luacheck: name: Luacheck runs-on: ubuntu-latest steps: - uses: actions/checkout@v1 - name: Prepare run: | sudo apt-get update sudo add-apt-repository universe sudo apt install luarocks -y sudo luarocks install luacheck - name: Run Luacheck run: sudo ./test/scripts/style-check.sh vint: name: Vint runs-on: ubuntu-latest steps: - uses: actions/checkout@v1 - name: Install python uses: actions/setup-python@v2 - name: Install vint run: | python -m pip install --upgrade pip pip install vim-vint - name: Lint with vint run: | vint plugin autoload
name: Package on: [push] jobs: package: name: Package runs-on: ubuntu-latest steps: - uses: actions/checkout@v1 - uses: actions/setup-node@v1 with: node-version: '10' - run: npm install - run: npm install -g vsce - run: vsce package - run: mkdir vhdl-ls && cp *.vsix vhdl-ls - uses: actions/upload-artifact@v1 with: name: vhdl-ls path: vhdl-ls publish: name: Publish if: startsWith(github.ref, 'refs/tags/v') needs: package runs-on: ubuntu-latest steps: - uses: actions/download-artifact@v1 with: name: vhdl-ls path: vhdl-ls - uses: actions/setup-node@v1 with: node-version: '10' - name: Print env run: | echo ${GITHUB_REF:11} ls ls vhdl-ls - run: npm install -g vsce - run: vsce publish -p $PUBLISH_TOKEN --packagePath vhdl-ls/vhdl-ls-${GITHUB_REF:11}.vsix env: PUBLISH_TOKEN: ${{ secrets.PUBLISH_TOKEN }}
<filename>.github/workflows/python-publish.yml name: Upload πŸ“¦ to PyPI on: push jobs: deploy: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Set up 🐍 uses: actions/setup-python@v3 with: python-version: 3.9 - name: Install pypa/build run: python -m pip install --upgrade --user build - name: Build a binary wheel and a source tarball run: python -m build --sdist --wheel --outdir dist/ - name: Publish πŸ“¦ to PyPI if: startsWith(github.ref, 'refs/tags') uses: pypa/gh-action-pypi-publish@release/v1 with: user: __token__ password: ${{ secrets.PYPI_API_TOKEN }} packages_dir: dist/ repository: pypi
package: name: lowrisc_prim description: "lowRISC RTL primitives" authors: ["lowRISC Contributors"] sources: # Level 0 - rtl/prim_util_pkg.sv # Level 1 - rtl/prim_fifo_sync.sv - rtl/prim_filter_ctr.sv - rtl/prim_intr_hw.sv
<filename>.github/workflows/archive.yml<gh_stars>10-100 name: TEST on: push: pull_request: jobs: test_run: runs-on: ubuntu-latest steps: - name: Checkout repo uses: actions/checkout@v2 - name: Setup Python uses: actions/setup-python@v1 with: python-version: '3.7' architecture: 'x64' - name: Install dependencies run: | python3 -m pip install --upgrade pip pip install numpy matplotlib pandas XlsxWriter pyyaml pyinstaller jinja2 - name: Install Magic run: | cd /home/runner/work sudo apt-get install m4 sudo apt-get install tcsh sudo apt-get install csh sudo apt-get install libx11-dev sudo apt-get install tcl-dev tk-dev git clone https://github.com/RTimothyEdwards/magic.git cd magic ./configure make sudo make install - name: Install Netgen run: | cd /home/runner/work git clone https://github.com/RTimothyEdwards/netgen.git cd netgen ./configure make sudo make install - name: Install OpenLane run: | cd /home/runner/work export PDK_ROOT=/home/runner/work/ git clone https://github.com/The-OpenROAD-Project/OpenLane.git cd OpenLane/ make # - name: Install OpenROAD # run: | # cd /home/runner/work # git clone --recursive https://github.com/The-OpenROAD-Project/OpenROAD.git # cd OpenROAD # sudo ./etc/DependencyInstaller.sh -dev # sudo ./etc/Build.sh # - name: PDK installation # run: cd ${GITHUB_WORKSPACE}/ && make pdk # - name: Install Klayout # run: | # cd /home/runner/work # git clone https://github.com/KLayout/klayout.git # cd klayout # sudo apt-add-repository ppa:rock-core/qt4 # sudo apt-add-repository ppa:ubuntu-toolchain-r/test # sudo apt update # sudo apt install qt4-default # sudo apt-get install gcc-4.7 # sudo apt-get install build-essential # ./build.sh -option '-j8' -noruby -without-qt-multimedia -without-qt-xml -without-qt-svg # - name: Install PDK # run: | # cd /home/runner/work # git clone https://github.com/RTimothyEdwards/open_pdks.git # cd open_pdks # ./configure --enable-sky130-pdk # make # sudo make install # docker run -v $(pwd):/mnt -w /mnt efabless/openlane sh -c 'make sky130hd_temp' - name: Locate gen-design folder run: | cd /home/runner/work/OpenFASOC/OpenFASOC/generators/temp-sense-gen make sky130hd_temp
<reponame>mfkiwl/neorv32-setups name: Implementation on: push: pull_request: schedule: - cron: '0 0 * * 5' workflow_dispatch: jobs: Matrix: runs-on: ubuntu-latest outputs: matrix: ${{ steps.generate.outputs.matrix }} steps: - name: '🧰 Repository Checkout' uses: actions/checkout@v2 - name: 'πŸ”§ Generate examples matrix' id: generate run: ./.github/generate-job-matrix.py All-in-one: needs: Matrix runs-on: ubuntu-latest strategy: fail-fast: false matrix: include: ${{ fromJson(needs.Matrix.outputs.matrix) }} name: 'πŸ›³οΈ All-in-one | ${{ matrix.board }} Β· ${{ matrix.design }}' steps: - name: '🧰 Repository Checkout' uses: actions/checkout@v2 with: fetch-depth: 0 submodules: recursive - name: '🚧 Generate ${{ matrix.board }} ${{ matrix.design }} bitstream' uses: docker://ghcr.io/stnolting/neorv32/impl with: args: make -C osflow BOARD=${{ matrix.board }} ${{ matrix.design }} - name: 'πŸ“€ Upload Artifact: ${{ matrix.board }} ${{ matrix.design }} bitstream and reports' uses: actions/upload-artifact@v2 with: name: ${{ matrix.board }}-${{ matrix.design }} path: | osflow/${{ matrix.bitstream }} osflow/${{ matrix.board }}/*-report.txt Windows: needs: Matrix runs-on: windows-latest strategy: fail-fast: false matrix: include: ${{ fromJson(needs.Matrix.outputs.matrix) }} name: '🟦 MINGW64 | ${{ matrix.board }} Β· ${{ matrix.design }}' defaults: run: shell: msys2 {0} steps: - name: '🟦 Setup MSYS2' uses: msys2/setup-msys2@v2 with: msystem: MINGW64 update: true install: make pacboy: > yosys:p nextpnr:p icestorm:p prjtrellis:p - name: 'βš™οΈ git config' run: git config --global core.autocrlf input shell: bash - name: '🧰 Checkout' uses: actions/checkout@v2 with: fetch-depth: 0 submodules: recursive - name: '🚧 Generate ${{ matrix.board }} ${{ matrix.design }} bitstream' run: make -C osflow BOARD=${{ matrix.board }} ${{ matrix.design }}
# Copyright lowRISC contributors. # Licensed under the Apache License, Version 2.0, see LICENSE for details. # SPDX-License-Identifier: Apache-2.0 # Azure template for archiving pipeline step outputs and uploading them. # # This template will archive all of $BIN_DIR, and upload it for use by # downstream jobs using download_artifacts_template.yml. # # This template expects that a variable $BUILD_ROOT is set. See # util/build_consts.sh for more information. steps: - bash: | set -e test -n "$BUILD_ROOT" . util/build_consts.sh tar -C "$BUILD_ROOT" \ -cvf "$BUILD_ROOT/build-bin.tar" \ "${BIN_DIR#"$BUILD_ROOT/"}" displayName: 'Archive step outputs' - publish: "$(Build.ArtifactStagingDirectory)/build-bin.tar" artifact: ${{ parameters.artifact }}-build-bin displayName: 'Upload step outputs'
name: unipolar-rz on: [push, pull_request] jobs: test: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 with: fetch-depth: 0 - uses: actions/setup-python@v2 with: python-version: '3.x' - name: Install hdlmake run: pip install -r requirements.txt - name: Install ModelSim dependencies run: | sudo dpkg --add-architecture i386 sudo apt-get update sudo apt-get install lib32z1 lib32stdc++6 libexpat1:i386 libc6:i386 libsm6:i386 libncurses5:i386 libx11-6:i386 zlib1g:i386 libxext6:i386 libxft2:i386 - name: Cache ModelSim uses: actions/cache@v2 with: path: ~/intelFPGA/* key: ${{ runner.os }}-modelsim-20.1 - name: Install ModelSim if not cached run: stat $HOME/intelFPGA/20.1/modelsim_ase || (curl 'https://download.altera.com/akdlm/software/acdsinst/20.1std.1/720/ib_installers/ModelSimSetup-20.1.1.720-linux.run' -o ModelSimSetup.run && chmod +x ModelSimSetup.run && ./ModelSimSetup.run --mode unattended --accept_eula 1 && sed -i 's/linux_rh60/linux/g' $HOME/intelFPGA/20.1/modelsim_ase/vco ) - name: Add ModelSim to PATH run: echo "$HOME/intelFPGA/20.1/modelsim_ase/bin" >> $GITHUB_PATH - name: SK6805 LED Testbench run: cd $GITHUB_WORKSPACE/sim/sk6805_tb/ && hdlmake fetch && hdlmake && make
#========================================================================= # Mentor Calibre GDS Merge -- Child #========================================================================= # Merges two GDS files, with: # # 1. The child.gds merged as a child cell of design.gds # 2. The child.gds placed at (coord_x, coord_y) inside design.gds # # Author : <NAME> # Date : November 5, 2019 # name: mentor-calibre-gdsmerge-child #------------------------------------------------------------------------- # Inputs and Outputs #------------------------------------------------------------------------- inputs: - adk - design.gds - child.gds outputs: - design_merged.gds #------------------------------------------------------------------------- # Commands #------------------------------------------------------------------------- commands: # For some reason, Calibre requires this directory to exist - mkdir -p $HOME/.calibrewb_workspace/tmp # Run the merge # Need 'echo |' to stop calibredrv from hanging if the step is backgrounded, # which is a known calibredrv bug noted in the manual (see calibr_drv_ref.pdf) - echo | calibredrv merge.tcl # Get the outputs - mkdir -p outputs && cd outputs - ln -sf ../design_merged.gds #------------------------------------------------------------------------- # Parameters #------------------------------------------------------------------------- parameters: design_gds: inputs/design.gds child_gds: inputs/child.gds # coord_x and coord_y # # Coordinates should be a floating point number (or integer) followed by # the character 'u' for microns. coord_x: 0.0u coord_y: 0.0u # flatten the child_gds into the design_gds flatten_child: False #------------------------------------------------------------------------- # Debug #------------------------------------------------------------------------- debug: - calibredrv -m design_merged.gds \ -l inputs/adk/calibre.layerprops #------------------------------------------------------------------------- # Assertions #------------------------------------------------------------------------- preconditions: - assert Tool( 'calibredrv' ) - assert File( 'inputs/design.gds' ) #- assert File( 'inputs/child.gds' ) # commented, maybe merging with adk postconditions: - assert File( 'outputs/design_merged.gds' ) # Duplicate structures # # GDS can be hierarchical, meaning they have holes where library cells # (e.g., stdcell GDS) can be filled in. If library cell names conflict, # there is a chance that one definition will overwrite the other and you # will see a very weird GDS that may not be functional or DRC clean # anymore (e.g., one SRAM macro may now be using another SRAM macro's # bitcell array). If a conflict happens unexpectedly here and goes by # undetected, it can take days or weeks to debug LVS before finally # realizing it was an incorrect GDS merge. # # Assert here to make sure we detect it early. There is a choice for # what to do next for the merged GDS: (1) use one library's version, (2) # use the other library's version, (3) rename both and each reference # their own version, (4) something else... # More naming conflicts # # The "-mode reportconflictsonly" option will report whether name # conflicts exist. The notice looks like this: # # Note: cell {cell_name} already exists in foo.gds, conflicts # with {cell_name} from bar.gds. # # We have to make sure this does not happen. # - "assert 'WARNING: Ignoring duplicate structure' not in File( 'mflowgen-run.log' )" - "assert 'conflicts with' not in File( 'mflowgen-run.log' )"
# see docs at https://docs.haskellstack.org/en/stable/yaml_configuration/ name: Aetherling version: 0.1.0.0 github: "David-Durst/Aetherling/theory/AetherlingSTIR" license: BSD3 author: "<NAME>" maintainer: "<EMAIL>" copyright: "2018 <NAME>" extra-source-files: - protoAetherling/**/*.proto # Metadata used when publishing your package # synopsis: Short description of your package # category: Web # To avoid duplicated efforts in documentation and dealing with the # complications of embedding Haddock markup inside cabal files, it is # common to point users to the README.md file. description: Please see the README on GitHub at <https://github.com/David-Durst/Aetherling/theory/AetherlingSTIR#readme> dependencies: - base - finite-typelits - vector-sized - ghc-typelits-extra - ghc-typelits-natnormalise - ghc-typelits-knownnat - ghc-typelits-natnormalise - singletons - split - type-iso - lens - reflection - mtl - containers - reflection - util - monad-memo - arithmoi - sbv - time - temporary - process - filepath - directory - random - random-shuffle - safe - filepath - timeit - deepseq - array - massiv - text - proto-lens - microlens - lens-family - bytestring - vector - Decimal - aeson ghc-options: - -O2 - -fconstraint-solver-iterations=0 - -fwarn-incomplete-patterns - -fplugin GHC.TypeLits.KnownNat.Solver - -fplugin GHC.TypeLits.Extra.Solver - -fplugin GHC.TypeLits.Normalise # - -fbreak-on-exception #- -Wall - comeback to this, too many warnings right now to handle #- -fplugin GHC.TypeLits.KnownNat.Solver custom-setup: dependencies: - base - Cabal - proto-lens-setup build-tools: proto-lens-protoc:proto-lens-protoc library: dependencies: - base - proto-lens-runtime source-dirs: src/Core default-extensions: - DataKinds - TypeOperators - TypeFamilies - FlexibleContexts - FlexibleInstances - ScopedTypeVariables - TypeApplications - TypeSynonymInstances - PolyKinds - LiberalTypeSynonyms - UndecidableInstances - MultiParamTypeClasses - AllowAmbiguousTypes - PartialTypeSignatures - ExistentialQuantification - DeriveFoldable - DeriveTraversable - GADTs - ConstraintKinds - Rank2Types - InstanceSigs - BangPatterns - DeriveGeneric - DeriveAnyClass - OverloadedStrings other-modules: - Proto.Spacetime - Proto.Spacetime_Fields - Proto.Sequence - Proto.Sequence_Fields #executables: # Aetherling-AppTest-exe: # main: Main.hs # source-dirs: # - src/AppMains/AppTest/ # ghc-options: # - -fprof-auto # - -threaded # - -with-rtsopts=-N # dependencies: # - Aetherling # default-extensions: # - DataKinds # - TypeOperators # - TypeFamilies # - FlexibleContexts # - FlexibleInstances # - ScopedTypeVariables # - TypeApplications # - TypeSynonymInstances # - PolyKinds # - LiberalTypeSynonyms # - UndecidableInstances # - MultiParamTypeClasses # - AllowAmbiguousTypes # - PartialTypeSignatures # - ExistentialQuantification # - DeriveFoldable # - DeriveTraversable # - GADTs # - ConstraintKinds # - Rank2Types # - InstanceSigs # Aetherling-Blur-exe: # main: Main.hs # source-dirs: # - src/Apps/ # - src/AppMains/Blur/ # ghc-options: # - -rtsopts # - -with-rtsopts=-N # dependencies: # - Aetherling # - stb-image-redux # - vector # default-extensions: # - DataKinds # - TypeOperators # - TypeFamilies # - FlexibleContexts # - FlexibleInstances # - ScopedTypeVariables # - TypeApplications # - TypeSynonymInstances # - PolyKinds # - LiberalTypeSynonyms # - UndecidableInstances # - MultiParamTypeClasses # - AllowAmbiguousTypes # - PartialTypeSignatures # - ExistentialQuantification # - DeriveFoldable # - DeriveTraversable # - GADTs # - ConstraintKinds # - Rank2Types # - InstanceSigs tests: Aetherling-Tests: main: Test_Main.hs source-dirs: - test/ ghc-options: - -threaded - -with-rtsopts=-N dependencies: - Aetherling - tasty - tasty-hunit default-extensions: - DataKinds - TypeOperators - TypeFamilies - FlexibleContexts - FlexibleInstances - ScopedTypeVariables - TypeApplications - TypeSynonymInstances - PolyKinds - LiberalTypeSynonyms - UndecidableInstances - MultiParamTypeClasses - AllowAmbiguousTypes - PartialTypeSignatures - ExistentialQuantification - DeriveFoldable - DeriveTraversable - GADTs - ConstraintKinds - Rank2Types - InstanceSigs
stages: - build - test before_script: - ./ci/setup.csh rtl_riscv: stage: test script: - ./ci/rtl-riscv.csh rtl_sequential: stage: test script: - ./ci/rtl-sequential.csh rtl_riscv_rvc: stage: test script: - ./ci/rtl-riscv-rvc.csh rtl_sequential_rvc: stage: test script: - ./ci/rtl-sequential-rvc.csh
package: name: zephyr-cosim version: 0.0.1 deps: - name: tblink-rpc-hdl url: https://github.com/tblink-rpc/tblink-rpc-hdl.git - name: vte url: https://github.com/fvutils/vte.git - name: pyelftools src: pypi - name: pyyaml src: pypi - name: packages src: pypi dev-deps: - name: googletest url: https://github.com/google/googletest/archive/refs/tags/release-1.11.0.tar.gz - name: tblink-rpc-hdl url: https://github.com/tblink-rpc/tblink-rpc-hdl.git - name: mkdv url: https://github.com/fvutils/mkdv.git - name: vte url: https://github.com/fvutils/vte.git - name: pyelftools src: pypi - name: pyyaml src: pypi - name: packages src: pypi - name: zephyr url: https://github.com/zephyr-cosim/zephyr.git branch: cosim
<reponame>danagsoc/pulp_soc pulpemu: incdirs: [ ../includes, ., ] defines: [ '$(PULP_FPGA_SIM_ZYNQ)', ] targets: [ xilinx, ] files: [ pulpemu_apb_demux.sv, pulpemu_clk_gating.sv, pulpemu_gpio.sv, pulpemu_i2c.sv, pulpemu_spi_master.sv, pulpemu_spi_slave.sv, pulpemu_stdout.sv, pulpemu.sv, pulpemu_trace.sv, pulpemu_uart.sv, pulpemu_zynq2pulp_gpio.sv, ../../tb/fake_zynq_wrapper.sv, ]
package: name: icache-intc sources: # Level 0 - Req_Arb_Node_icache_intc.sv - Resp_Arb_Node_icache_intc.sv - lint_mux.sv # Level 1 - DistributedArbitrationNetwork_Req_icache_intc.sv - DistributedArbitrationNetwork_Resp_icache_intc.sv # Level 2 - RoutingBlock_Req_icache_intc.sv - RoutingBlock_2ch_Req_icache_intc.sv - RoutingBlock_Resp_icache_intc.sv # Level 3 - icache_intc.sv
package: name: wishbone_bfms version: None deps: - name: simscripts url: http://github.com/mballance/simscripts.git type: raw - name: fwprotocol-defs url: http://github.com/Featherweight-IP/fwprotocol-defs.git type: raw - name: vlsim url: http://github.com/mballance/vlsim.git type: python - name: cocotb type: python src: pypi - name: pybfms url: https://github.com/pybfms/pybfms.git type: python - name: fltools url: https://github.com/fvutils/fltools.git type: python dev-deps: - name: simscripts url: http://github.com/mballance/simscripts.git type: raw - name: fwprotocol-defs url: http://github.com/Featherweight-IP/fwprotocol-defs.git type: raw
<filename>conf/fusesoc-configs/earlgrey-synth.yml # Copyright (C) 2019-2021 The SymbiFlow Authors. # # Use of this source code is governed by a ISC-style # license that can be found in the LICENSE file or at # https://opensource.org/licenses/ISC # # SPDX-License-Identifier: ISC name: earlgrey description: Earlgrey design from opentitan top_module: chip_earlgrey_nexysvideo tags: earlgrey path: third_party/cores/opentitan command: fusesoc --cores-root third_party/cores/opentitan run --flag=fileset_top --target=synth --setup lowrisc:systems:chip_earlgrey_nexysvideo conf_file: build/lowrisc_systems_chip_earlgrey_nexysvideo_0.1/synth-vivado/lowrisc_systems_chip_earlgrey_nexysvideo_0.1.tcl test_file: earlgrey-synth.sv timeout: 360 compatible-runners: yosys-uhdm vanilla-yosys-uhdm-plugin yosys yosys-sv zachjs-sv2v icarus moore moore-parse odin sv-parser tree-sitter-verilog verible verible_extractor Surelog slang-parse type: parsing elaboration
{% set filename = "{{name}}.yaml" %} identifier: {{name}} name: Native 32-bit POSIX port type: native arch: posix ram: 65536 flash: 65536 toolchain: - host - llvm supported: - eeprom - netif:eth - usb_device - adc - i2c - spi testing: default: true
<gh_stars>1-10 name: Nightly Build on: push: branches: [ main ] paths: - "neorv32/**" - "osflow/**" workflow_dispatch: jobs: build: runs-on: ubuntu-latest steps: - name: Checkout Repository uses: actions/checkout@v2 with: submodules: 'true' - name: Build Neorv32 uses: docker://gcr.io/hdl-containers/impl/icestorm with: args: make GHDL_MODULE="-m ghdl" -C osflow/boards/iceduino all - name: Create report run: | cat osflow/boards/iceduino/yosys-report.txt >> report.txt cat osflow/boards/iceduino/nextpnr-report.txt >> report.txt - name: Create release uses: "marvinpinto/action-automatic-releases@latest" with: repo_token: "${{ secrets.GITHUB_TOKEN }}" automatic_release_tag: "nightly" prerelease: true title: "Nightly Build" files: | report.txt osflow/boards/iceduino/iceduino_impl.bin
variables: GIT_STRATEGY: fetch GIT_SUBMODULE_STRATEGY: none stages: - prereq - check - test-short - test-long #pass: # script: # - echo "Passing job 1" # - exit 0 #fail: # script: # - echo "Failing job 2" # - exit 1 .job_template: &job_definition only: - master - dev - fe_dev - be_dev - me_dev - top_dev - sw_dev before_script: - echo "Updating libraries" - make update_libs - echo "Making CCE ucode" - make ucode > make_ucode.log artifacts: when: always paths: - "*.log" - "bp_fe/syn/reports/" - "bp_be/syn/reports/" - "bp_me/syn/reports/" - "bp_top/syn/reports/" cache: &global_cache key: $CI_COMMIT_REF_SLUG paths: - $CI_PROJECT_DIR/external/ policy: pull update-cache: <<: *job_definition when: manual stage: prereq tags: - bsg script: - make tools -j $CI_CORES > make_tools.log - make tidy_tools > make_tidy_tools.log - make progs > make_progs.log cache: key: $CI_COMMIT_REF_SLUG paths: - $CI_PROJECT_DIR/external/ - $CI_PROJECT_DIR/bp_common/test/mem/ policy: push check-design: <<: *job_definition stage: check tags: - dc script: - $CI_PROJECT_DIR/ci/regress.sh check_design.syn bp_top lint-verilator: <<: *job_definition stage: check tags: - verilator script: - $CI_PROJECT_DIR/ci/regress.sh lint.sc bp_top me-regress-verilator: <<: *job_definition stage: test-short tags: - verilator script: - $CI_PROJECT_DIR/ci/regress.sh regress.me.sc bp_me me-regress-vcs: <<: *job_definition stage: test-short tags: - vcs script: - $CI_PROJECT_DIR/ci/regress.sh regress.me.v bp_me top-riscv-verilator: <<: *job_definition stage: test-short tags: - verilator script: - make -C bp_top/syn clean build.sc - $CI_PROJECT_DIR/ci/regress.sh regress_riscv.sc bp_top top-coremark-verilator: <<: *job_definition when: manual stage: test-long tags: - verilator script: - make -C bp_top/syn clean build.sc - $CI_PROJECT_DIR/ci/regress.sh "sim.sc PROG=coremark" bp_top # Disabled because it's too long running # TODO: Investigate why top-beebs-verilator: <<: *job_definition when: manual stage: test-long tags: - verilator script: - make -C bp_top/syn clean build.sc - $CI_PROJECT_DIR/ci/regress.sh regress_beebs.sc bp_top top-mc-verilator: <<: *job_definition stage: test-short tags: - verilator script: - make -C bp_top/syn clean build.sc CFG=e_bp_single_core_cfg - make -C bp_top/syn build.sc CFG=e_bp_dual_core_cfg - make -C bp_top/syn build.sc CFG=e_bp_quad_core_cfg - $CI_PROJECT_DIR/ci/regress.sh "mc_sanity_1.sc CFG=e_bp_single_core_cfg" bp_top - $CI_PROJECT_DIR/ci/regress.sh "mc_sanity_2.sc CFG=e_bp_dual_core_cfg" bp_top - $CI_PROJECT_DIR/ci/regress.sh "mc_sanity_4.sc CFG=e_bp_quad_core_cfg" bp_top lint-vcs: <<: *job_definition stage: check tags: - vcs script: - $CI_PROJECT_DIR/ci/regress.sh lint.v bp_top top-riscv-tests-vcs: <<: *job_definition stage: test-short tags: - vcs script: - make -C bp_top/syn clean build.v - $CI_PROJECT_DIR/ci/regress.sh regress_riscv.v bp_top top-coremark-vcs: <<: *job_definition stage: test-long tags: - vcs script: - make -C bp_top/syn clean build.v - $CI_PROJECT_DIR/ci/regress.sh "sim.v PROG=coremark" bp_top top-beebs-vcs: <<: *job_definition stage: test-long tags: - vcs script: - make -C bp_top/syn clean build.v - $CI_PROJECT_DIR/ci/regress.sh regress_beebs.v bp_top top-mc-vcs: <<: *job_definition stage: test-short tags: - vcs script: - make -C bp_top/syn clean build.v CFG=e_bp_single_core_cfg - make -C bp_top/syn build.v CFG=e_bp_dual_core_cfg - make -C bp_top/syn build.v CFG=e_bp_quad_core_cfg - make -C bp_top/syn build.v CFG=e_bp_oct_core_cfg - make -C bp_top/syn build.v CFG=e_bp_sexta_core_cfg - $CI_PROJECT_DIR/ci/regress.sh "mc_sanity_1.v CFG=e_bp_single_core_cfg" bp_top - $CI_PROJECT_DIR/ci/regress.sh "mc_sanity_2.v CFG=e_bp_dual_core_cfg" bp_top - $CI_PROJECT_DIR/ci/regress.sh "mc_sanity_4.v CFG=e_bp_quad_core_cfg" bp_top - $CI_PROJECT_DIR/ci/regress.sh "mc_sanity_8.v CFG=e_bp_oct_core_cfg" bp_top - $CI_PROJECT_DIR/ci/regress.sh "mc_sanity_16.v CFG=e_bp_sexta_core_cfg" bp_top
<gh_stars>1-10 ################################################################ # # Copyright (c) #YEAR# #LICENSOR#. All rights reserved. # # The information and source code contained herein is the # property of #LICENSOR#, and may not be disclosed or # reproduced in whole or in part without explicit written # authorization from #LICENSOR#. # # * Filename : delay.ifc.yaml # * Author : <NAME> (<EMAIL>) # * Description: SV template for a delay cell # # * Note : # - Datatype can be one of the followings [pwl, real, logic] # - Array is not allowed for in/out pins # # * Todo : # - # # * Revision : # - 00/00/00 : # ################################################################ module_name: delay description: Interface for a delay cell pin: in: name: in description: input signal direction: input datatype: pwl vectorsize: 0 out: name: out description: output signal direction: output datatype: pwl vectorsize: 0 metric: # optional behaviors to be incorporated in a model modelparam: # model parameters testparam: # test parameters
<filename>.github/workflows/ci_ubuntu.yaml name: Ubuntu on: [push] jobs: lint-code: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - run: sudo apt install pylint - run: cd test; ./lint.sh ci-tests: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - run: sudo apt install iverilog - run: iverilog -V - run: sudo apt install verilator - run: verilator -V - run: cd test; ./regression.sh
sudo: false dist: bionic services: - docker env: - PATH=$PATH:$(pwd)/riscv/bin # Install dependencies addons: apt: packages: - doxygen script: # Generate documentary (modify doxygen script for travis ci before) - sed -i 's/$(PWD)\/../$(TRAVIS_BUILD_DIR)/g' docs/doxygen_makefile_sw - doxygen docs/doxygen_makefile_sw # Get simulator tools - docker pull ghdl/ghdl:buster-gcc-8.3.0 # Get toolchain and run SW check - /bin/bash -c "chmod u+x .ci/install.sh && .ci/install.sh" - /bin/bash -c "chmod u+x .ci/sw_check.sh && .ci/sw_check.sh" # HW check - > docker run -t -v `pwd`:/mnt/data ghdl/ghdl:buster-gcc-8.3.0 /bin/bash -c "chmod u+x /mnt/data/.ci/hw_check.sh && /mnt/data/.ci/hw_check.sh" # Deploy documentation deploy: provider: pages skip_cleanup: true local_dir: doxygen_build/html github_token: $GH_REPO_TOKEN on: branch: master notifications: email: on_success: never on_failure: always
language: generic script: - docker run --rm -v ${TRAVIS_BUILD_DIR}:/project antonkrug/verilator-lcov-slim:da1cef5 bash -c "cd /project && npm install" - mkdir -p ${TRAVIS_BUILD_DIR}/images - chmod a+rw ${TRAVIS_BUILD_DIR}/images - rm ${TRAVIS_BUILD_DIR}/images/* - docker run --rm -v ${TRAVIS_BUILD_DIR}:/project wernight/phantomjs:2.1.1 bash -c "/project/images_src/generate_images.sh" - docker run --rm -v ${TRAVIS_BUILD_DIR}:/project antonkrug/verilator-lcov-slim:da1cef5 bash -c "cd /project && npm run-script lint" - docker run --rm -v ${TRAVIS_BUILD_DIR}:/project antonkrug/verilator-lcov-slim:da1cef5 bash -c "cd /project && npm run-script build" - docker run --rm -v ${TRAVIS_BUILD_DIR}:/project antonkrug/verilator-lcov-slim:da1cef5 bash -c "cd /project && npm run-script run" - docker run --rm -v ${TRAVIS_BUILD_DIR}:/project --env-file <(env) antonkrug/verilator-lcov-slim:da1cef5 bash -c "cd /project && npm run-script coverage:coveralls" - docker run --rm -v ${TRAVIS_BUILD_DIR}:/project antonkrug/verilator-lcov-slim:da1cef5 bash -c "cd /project && npm run-script coverage:html-report"
<reponame>JavascriptID/sourcerer-app --- kind: pipeline name: default platform: os: linux arch: amd64 steps: - name: test image: alpine:3.8 commands: - apk add curl - sleep 45 - curl http://database:9200 services: - name: database image: elasticsearch:5-alpine ports: - 9200
name: svlint version: &version v0.5.4 summary: SystemVerilog linter description: | A lint checker supporting SystemVerilog ( IEEE Std. 1800-2017 ) base: core18 license: MIT confinement: strict architectures: - build-on: amd64 - build-on: i386 - build-on: ppc64el - build-on: arm64 - build-on: armhf apps: svlint: command: svlint plugs: - home - removable-media parts: svlint: source: https://github.com/dalance/svlint.git source-tag: *version plugin: rust stage-packages: - libc6 - libgcc1 - libstdc++6 - zlib1g
<filename>.github/workflows/unittest.yml # ************************************************************************ # @author: <NAME> # @copyright: Copyright 2021 # @credits: AKAE # # @license: BSDv3 # @maintainer: <NAME> # @email: <EMAIL> # # @file: unittest.yml # @date: 2021-08-25 # # @brief: runs test and deploy # # ************************************************************************ name: Unittest on: push: paths-ignore: - 'doc/**' - 'README.md' jobs: test: runs-on: ubuntu-latest env: GHDL_OPTS: "--std=93c --ieee=synopsys --time-resolution=ps --workdir=./sim/work" steps: - uses: actions/checkout@v1 - uses: ghdl/setup-ghdl-ci@nightly with: backend: mcode - name: Prepare run: | ghdl --version mkdir -p ./sim/work - name: generic_spi_master_inp_filter_tb run: | ghdl -a ${GHDL_OPTS} ./hdl/generic_spi_master_inp_filter.vhd ghdl -a ${GHDL_OPTS} ./tb/generic_spi_master_inp_filter_tb.vhd ((ghdl -r ${GHDL_OPTS} generic_spi_master_inp_filter_tb -gDO_ALL_TEST=True) || if [ $? -ne 0 ]; then echo "[ FAIL ] generic_spi_master_inp_filter"; exit 1; fi) - name: generic_spi_master_mode_0_3_tb run: | ghdl -a ${GHDL_OPTS} ./hdl/generic_spi_master_inp_filter.vhd ghdl -a ${GHDL_OPTS} ./hdl/generic_spi_master.vhd ghdl -a ${GHDL_OPTS} ./tb/generic_spi_master_mode_0_3_tb.vhd ((ghdl -r ${GHDL_OPTS} generic_spi_master_mode_0_3_tb -gDO_ALL_TEST=True -gCLK_DIV2=1 -gSPI_MODE=0) || if [ $? -ne 0 ]; then echo "[ FAIL ] generic_spi_master_mode_0_3_tb"; exit 1; fi) ((ghdl -r ${GHDL_OPTS} generic_spi_master_mode_0_3_tb -gDO_ALL_TEST=True -gCLK_DIV2=2 -gSPI_MODE=0) || if [ $? -ne 0 ]; then echo "[ FAIL ] generic_spi_master_mode_0_3_tb"; exit 1; fi) ((ghdl -r ${GHDL_OPTS} generic_spi_master_mode_0_3_tb -gDO_ALL_TEST=True -gCLK_DIV2=3 -gSPI_MODE=0) || if [ $? -ne 0 ]; then echo "[ FAIL ] generic_spi_master_mode_0_3_tb"; exit 1; fi) ((ghdl -r ${GHDL_OPTS} generic_spi_master_mode_0_3_tb -gDO_ALL_TEST=True -gCLK_DIV2=1 -gSPI_MODE=1) || if [ $? -ne 0 ]; then echo "[ FAIL ] generic_spi_master_mode_0_3_tb"; exit 1; fi) ((ghdl -r ${GHDL_OPTS} generic_spi_master_mode_0_3_tb -gDO_ALL_TEST=True -gCLK_DIV2=2 -gSPI_MODE=1) || if [ $? -ne 0 ]; then echo "[ FAIL ] generic_spi_master_mode_0_3_tb"; exit 1; fi) ((ghdl -r ${GHDL_OPTS} generic_spi_master_mode_0_3_tb -gDO_ALL_TEST=True -gCLK_DIV2=3 -gSPI_MODE=1) || if [ $? -ne 0 ]; then echo "[ FAIL ] generic_spi_master_mode_0_3_tb"; exit 1; fi) ((ghdl -r ${GHDL_OPTS} generic_spi_master_mode_0_3_tb -gDO_ALL_TEST=True -gCLK_DIV2=1 -gSPI_MODE=2) || if [ $? -ne 0 ]; then echo "[ FAIL ] generic_spi_master_mode_0_3_tb"; exit 1; fi) ((ghdl -r ${GHDL_OPTS} generic_spi_master_mode_0_3_tb -gDO_ALL_TEST=True -gCLK_DIV2=2 -gSPI_MODE=2) || if [ $? -ne 0 ]; then echo "[ FAIL ] generic_spi_master_mode_0_3_tb"; exit 1; fi) ((ghdl -r ${GHDL_OPTS} generic_spi_master_mode_0_3_tb -gDO_ALL_TEST=True -gCLK_DIV2=3 -gSPI_MODE=2) || if [ $? -ne 0 ]; then echo "[ FAIL ] generic_spi_master_mode_0_3_tb"; exit 1; fi) ((ghdl -r ${GHDL_OPTS} generic_spi_master_mode_0_3_tb -gDO_ALL_TEST=True -gCLK_DIV2=1 -gSPI_MODE=3) || if [ $? -ne 0 ]; then echo "[ FAIL ] generic_spi_master_mode_0_3_tb"; exit 1; fi) ((ghdl -r ${GHDL_OPTS} generic_spi_master_mode_0_3_tb -gDO_ALL_TEST=True -gCLK_DIV2=2 -gSPI_MODE=3) || if [ $? -ne 0 ]; then echo "[ FAIL ] generic_spi_master_mode_0_3_tb"; exit 1; fi) ((ghdl -r ${GHDL_OPTS} generic_spi_master_mode_0_3_tb -gDO_ALL_TEST=True -gCLK_DIV2=3 -gSPI_MODE=3) || if [ $? -ne 0 ]; then echo "[ FAIL ] generic_spi_master_mode_0_3_tb"; exit 1; fi) - name: generic_spi_master_HC594_HC165_tb run: | ghdl -a ${GHDL_OPTS} ./hdl/generic_spi_master_inp_filter.vhd ghdl -a ${GHDL_OPTS} ./hdl/generic_spi_master.vhd ghdl -a ${GHDL_OPTS} ./tb/HC165.vhd ghdl -a ${GHDL_OPTS} ./tb/HC594.vhd ghdl -a ${GHDL_OPTS} ./tb/generic_spi_master_HC594_HC165_tb.vhd ((ghdl -r ${GHDL_OPTS} generic_spi_master_HC594_HC165_tb -gDO_ALL_TEST=True) || if [ $? -ne 0 ]; then echo "[ FAIL ] generic_spi_master_HC594_HC165_tb"; exit 1; fi)
<gh_stars>1-10 --- algorithm: class: Spea2 population_size: 300 max_archive_size: 200 duplicate_elimination: false shorten_archive_individual: true probabilities: crossover: 0.5 mutation: 0.01 injection: 0.9 init: method: ramped # grow or full or ramped sensible_depth: 7 inject: method: grow # grow or full or random sensible_depth: 7 termination: max_steps: 1000 on_individual: stopping_condition grammar: class: Abnf::File filename: sample/toy_regression/grammar.abnf mapper: class: DepthFirst track_support_on: true selection: class: Tournament tournament_size: 2 selection_rank: # do not change class: Spea2Ranking crossover: class: CrossoverLHS mutation: class: MutationNodal store: class: Store filename: ./toy_spea2_lhsc.store report: class: ToyReport require: sample/toy_regression/toy_report.rb individual: class: ToyIndividualMOWeak require: sample/toy_regression/toy_individual.rb shorten_chromozome: false
<gh_stars>0 # Check external services - hosts: test_orchestrator any_errors_fatal: true tasks: - name: check if Kafka cluster is available command: "{{ test_orchestrator_python_bin }} ./check-kafka-available.py {{ integration_test_kafka_bootstrap_servers }}" args: chdir: "{{ test_orchestrator_script_dir }}"
stages: - test_riscy - test_riscy_fp - test_zero - test_micro rtl_riscy: stage: test_riscy script: - ./ci/rtl-basic.csh before_script: - ./ci/setup_riscy.csh when: always rtl_riscy_sequential: stage: test_riscy script: - ./ci/rtl-sequential.csh before_script: - ./ci/setup_riscy.csh when: always rtl_riscy_rvc: stage: test_riscy script: - ./ci/rtl-basic-rvc.csh before_script: - ./ci/setup_riscy.csh when: always rtl_riscy_sequential_rvc: stage: test_riscy script: - ./ci/rtl-sequential-rvc.csh before_script: - ./ci/setup_riscy.csh when: always rtl_riscy_fp: stage: test_riscy_fp script: - ./ci/rtl-basic.csh before_script: - ./ci/setup_riscy_fp.csh when: always rtl_riscy_fp_sequential: stage: test_riscy_fp script: - ./ci/rtl-sequential.csh before_script: - ./ci/setup_riscy_fp.csh when: always rtl_riscy_fp_ml: stage: test_riscy_fp script: - ./ci/rtl-ml.csh before_script: - ./ci/setup_riscy_fp.csh when: always rtl_riscy_fp_rvc: stage: test_riscy_fp script: - ./ci/rtl-basic-rvc.csh before_script: - ./ci/setup_riscy_fp.csh when: always rtl_riscy_fp_sequential_rvc: stage: test_riscy_fp script: - ./ci/rtl-sequential-rvc.csh before_script: - ./ci/setup_riscy_fp.csh when: always rtl_riscy_fp_ml_rvc: stage: test_riscy_fp script: - ./ci/rtl-ml-rvc.csh before_script: - ./ci/setup_riscy_fp.csh when: always rtl_zero: stage: test_zero script: - ./ci/rtl-basic.csh before_script: - ./ci/setup_zero.csh when: always rtl_zero_sequential: stage: test_zero script: - ./ci/rtl-sequential.csh before_script: - ./ci/setup_zero.csh when: always rtl_zero_rvc: stage: test_zero script: - ./ci/rtl-basic-rvc.csh before_script: - ./ci/setup_zero.csh when: always rtl_zero_sequential_rvc: stage: test_zero script: - ./ci/rtl-sequential-rvc.csh before_script: - ./ci/setup_zero.csh when: always rtl_micro: stage: test_micro script: - ./ci/rtl-basic.csh before_script: - ./ci/setup_micro.csh when: always rtl_micro_sequential: stage: test_micro script: - ./ci/rtl-sequential.csh before_script: - ./ci/setup_micro.csh when: always rtl_micro_rvc: stage: test_micro script: - ./ci/rtl-basic-rvc.csh before_script: - ./ci/setup_micro.csh when: always rtl_micro_sequential_rvc: stage: test_micro script: - ./ci/rtl-sequential-rvc.csh before_script: - ./ci/setup_micro.csh when: always
<reponame>captainko/vim-matchup stages: - test - gen_coverage - deploy .setup: before_script: - apk update - apk add curl git bash make - apk add build-base neovim python3 - curl -sSL https://bootstrap.pypa.io/get-pip.py | python3 - git clone --depth=1 https://github.com/junegunn/vader.vim.git ./test/vader/vader.vim test-latest: stage: test extends: .setup image: name: thinca/vim:latest-full entrypoint: [""] variables: MYVIM: vim -T dumb --not-a-term -n script: cd ./test/new && make -j1 artifacts: paths: - ./test/new/cov.tmp/ test-8.0.1575: stage: test extends: .setup image: name: thinca/vim:v8.0.1575-full entrypoint: [""] variables: MYVIM: vim -T dumb --not-a-term -n script: cd ./test/new && make -j1 artifacts: paths: - ./test/new/cov.tmp/ test-7.4.2273: stage: test extends: .setup image: name: thinca/vim:v7.4.2273-full entrypoint: [""] variables: MYVIM: vim -T dumb --not-a-term -n script: cd ./test/new && make -j1 artifacts: paths: - ./test/new/cov.tmp/ test-neovim: stage: test extends: .setup image: alpine:latest script: cd ./test/new && make -j1 artifacts: paths: - ./test/new/cov.tmp/ test-neovim-treesitter: stage: test extends: .setup image: alpine:edge variables: TESTS_ENABLE_TREESITTER: 1 script: - git clone --depth=1 https://github.com/nvim-treesitter/nvim-treesitter.git test/vader/plugged/nvim-treesitter - cd ./test/new - nvim --headless -Nu common/bootstrap.vim -c 'TSInstallSync! python' -c 'TSInstallSync! ruby' -c 'q' - make -j1 artifacts: paths: - ./test/new/cov.tmp/ coverage: stage: gen_coverage extends: .setup image: alpine:latest script: cd ./test/new && make -j1 coverage artifacts: reports: cobertura: ./test/new/coverage.xml paths: - ./test/new/htmlcov pages: stage: deploy image: ruby:2.3 script: - mkdir public - mv ./test/new/htmlcov public/ artifacts: paths: - public expire_in: 30 days dependencies: - coverage only: - master - dev-test-ts-split
package: name: axi2apb authors: [ "<NAME> <<EMAIL>>", "<NAME> <<EMAIL>>" ] dependencies: axi: { git: "https://github.com/pulp-platform/axi.git", version: 0.4.5 } apb: { git: "https://github.com/pulp-platform/apb.git", version: 0.1.0 } sources: - src/axi2apb.sv - src/axi2apb_64_32.sv - src/axi2apb_wrap.sv
# This YAML file is used to generate the instrumentation --- # Parameters to define the architecture (see full description of parameters in the docs folder) N: 8 # Input vector width M: 4 # Number of range filters in filter unit IB_DEPTH: 8 # Input buffer depth FUVRF_SIZE: 4 # Size of FUVRF in M*elements VVVRF_SIZE: 8 # Size of VVVRF in N*elements TB_SIZE: 64 # Size of Trace Buffer in N*elements MAX_CHAINS: 8 # Maximum number of chains of the firmware # Define how building blocks are connected BUILDING_BLOCKS: - InputBuffer - FilterReduceUnit - VectorVectorALU - VectorScalarReduce - DataPacker - TraceBuffer # Define the initial firmware (see complete list at src/firmware/firmware.py) FIRM: distribution
<filename>.github/workflows/build.yml # This workflow renders all the Markdown notes into a HTML website. name: 'Build the website' on: push: branches: [master] jobs: build: runs-on: ubuntu-latest steps: - name: 'Checkout `master`' uses: actions/checkout@v2 - name: 'Checkout `gh-pages` into a separate directory' uses: actions/checkout@v2 with: path: 'dist' ref: 'gh-pages' - name: 'Render the website' uses: 'jerry-sky/[email protected]' - name: 'Get current time' uses: gerred/actions/current-time@master id: current-time - name: push the changes uses: EndBug/[email protected] with: message: "deployed on ${{ steps.current-time.outputs.time }}" ref: 'gh-pages' cwd: './dist/' add: '*'
# Adapted from Garnet name: openram-gen-sram commands: - | ### Option 1 (slow): Run OpenRAM ## generate config file for OpemRAM # python gen_config.py ## run OpenRAM # python $OPENRAM_HOME/openram.py myconfig ## fix metal names in LEF file # cd temp # sed -i 's/LAYER\s+m1/LAYER metal1/g' *.lef # sed -i 's/LAYER\s+m2/LAYER metal2/g' *.lef # sed -i 's/LAYER\s+m3/LAYER metal3/g' *.lef # sed -i 's/LAYER\s+m4/LAYER metal4/g' *.lef # cd .. ### Option 2 (fast): Download pre-compiled SRAM # Download tarball from GitHub wget https://github.com/StanfordVLSI/dragonphy2/releases/download/v0.0.3/sram_144_1024_freepdk45.tar.gz # Untar tar xzvf sram_144_1024_freepdk45.tar.gz # Rename folder to match output of OpenRAM mv sram_144_1024_freepdk45 temp # Link the outputs needed for mflowgen mkdir -p outputs cd outputs ln -s ../temp/*.lib sram_tt.lib ln -s ../temp/*.lef sram.lef ln -s ../temp/*.gds sram.gds ln -s ../temp/*.sp sram.spi cd .. # run script to generate a *.db file from the *.lib model mkdir -p build cd build dc_shell-xg-t -f ../generate_db.tcl cd .. parameters: sram_word_size: 144 sram_num_words: 1024 sram_tech_name: freepdk45 sram_output_path: temp outputs: - sram_tt.lib - sram.lef - sram.gds - sram.spi - sram_tt.db
apiVersion: "autoscaling/v1" kind: "HorizontalPodAutoscaler" metadata: name: "chal" #spec: # maxReplicas: 3
name: mc-gen-sram-small commands: - bash gen_srams.sh outputs: - sram_small.v - sram_small_pwr.v - sram_small.lef - sram_small_tt.lib - sram_small_tt.db - sram_small.gds - sram_small.spi parameters: sram_word_size: 64 sram_num_words: 256 sram_mux_size: 4 # TODO: check this sram_corner: "tt0p8v25c" sram_partial_write: True
<reponame>hito0512/Vitis-AI name: vitis-ai-optimizer_pytorch channels: - pytorch - anaconda dependencies: - python=3.6 - vai_optimizer_pytorch_gpu
<filename>setup/docker/dockerfiles/gpu_conda/vitis-ai-optimizer_tensorflow.yml name: vitis-ai-optimizer_tensorflow channels: - conda-forge - anaconda dependencies: - python=3.6 - vai_optimizer_tensorflow_gpu
# Copyright lowRISC contributors. # Licensed under the Apache License, Version 2.0, see LICENSE for details. # SPDX-License-Identifier: Apache-2.0 mtime: implemented: False nmi: address: 0x800000FC # trap vec (mtvec base) + 0x7C reset: address: 0x80000080 # boot address + 0x80
# Application dependencies require: - play - play -> secure - play -> less 0.3.compatibility - play -> coffee 1.0 - play -> postmark 1.0 - ajaxupload -> ajaxupload 0.1 repositories: - frost: type: http artifact: "http://frostdigital.github.com/module-repo/[module]-[revision].zip" contains: - ajaxupload
--- modules: - identifier: TestModule parameters: [] ports: - identifier: d direction: Output datakind: Variable datatype: Class classid: my_enum_t nettype: ~ signedness: ~ - identifier: e direction: Output datakind: Variable datatype: Class classid: my_struct_t nettype: ~ signedness: ~ - identifier: f direction: Output datakind: Variable datatype: Class classid: my_struct_t nettype: ~ signedness: ~ filepath: testcases/sv/ansi_port_output_var_class.sv packages: []
<reponame>tblink-rpc/tblink-rpc-gw filespec: - vlnv: tblink-rpc-gw:sim:tblink-rpc-cmdproc-tb out: - name: PYTHON_PATHS type: pythonPath flags: python - name: MKDV_VL_SRCS type: - verilogSource - systemVerilogSource flags: sv - name: MKDV_VL_INCDIRS type: - verilogSource - systemVerilogSource flags: sv include: True
variables: STACK_ROOT: "${CI_PROJECT_DIR}/.stack" stages: - test_build - post_build_test - deploy test: image: fpco/stack-build:lts-10.3 stage: test_build script: - stack setup --system-ghc - stack test --system-ghc --coverage --test-arguments "--color always --quickcheck-tests 1000" - mkdir -p ./public/coverage - cp -r $(stack path --system-ghc --local-hpc-root)/* ./public/coverage coverage: '/(\d+)\% expressions used/' cache: key: test paths: - .stack - .stack-work artifacts: paths: - public/ expire_in: 1 hour bench: image: fpco/stack-build:lts-10.3 stage: test_build script: - stack setup --system-ghc - mkdir -p ./public/benchmark - stack bench --system-ghc --benchmark-arguments "-o ./public/benchmark/index.html" cache: key: bench paths: - .stack - .stack-work artifacts: paths: - public/ expire_in: 1 hour build-cpp: image: fpco/stack-build:lts-10.3 stage: test_build script: - stack setup --system-ghc - mkdir build - stack install htar9-asm:htar9-asm-exe --system-ghc --local-bin-path build cache: key: build paths: - .stack - .stack-work artifacts: paths: - build/ build-hs: image: fpco/stack-build:lts-10.3 stage: test_build script: - stack setup --system-ghc - mkdir build - stack install htar9-asm:htar9-asm-hs-exe --system-ghc --local-bin-path build cache: key: build paths: - .stack - .stack-work artifacts: paths: - build/ build-isa-pdf: image: aergus/latex stage: test_build script: - cd docs - latexmk -lualatex -interaction=nonstopmode isa.tex - cd .. - mkdir public - mv ./docs/isa.pdf ./public artifacts: paths: - public/ expire_in: 1 hour pages: # stage precedence means previous stage artifacts will be downloaded stage: deploy script: echo "some script to run to appease gitlab CI" # TODO: get haddock working on haddocking my code not all but my code #- stack haddock htar9-asm --haddock-arguments "--odir=${CI_PROJECT_DIR}/public/docs" #- stack ide targets --system-ghc #- echo ${CI_COMMIT_REF_SLUG} only: refs: - master artifacts: paths: - public/
package: name: udma_i2c authors: - "<NAME> <<EMAIL>>" - "<NAME> <<EMAIL>>" - "<NAME> <<EMAIL>>" - "<NAME> <<EMAIL>>" - "<NAME> <<EMAIL>>" dependencies: udma_core: { git: "<EMAIL>:Alsaqr-platform/udma_core.git", version: 2.0.0 } sources: - include_dirs: - rtl files: # Source files grouped in levels. Files in level 0 have no dependencies on files in this # package. Files in level 1 only depend on files in level 0, files in level 2 on files in # levels 1 and 0, etc. Files within a level are ordered alphabetically. # Level 0 - rtl/udma_i2c_bus_ctrl.sv - rtl/udma_i2c_reg_if.sv # Level 1 - rtl/udma_i2c_control.sv # Level 2 - rtl/udma_i2c_top.sv
# Check integration test results # Make sure test result directory exists and is empty - hosts: test_orchestrator tasks: - name: test results are absent file: path: "{{ integration_test_result_dir }}" state: absent - name: test result dir exists file: path: "{{ integration_test_result_dir }}" state: directory - hosts: all tasks: - name: save log to file become: yes shell: "journalctl --no-pager --since='{{ integration_test_start_time.stdout }}' > /home/jenkins/{{ ansible_fqdn }}.log" - name: copy log file fetch: src: "/home/jenkins/{{ ansible_fqdn }}.log" dest: "{{ integration_test_result_dir }}/" flat: yes - name: log file is absent become: yes file: path: "~/{{ ansible_fqdn }}.log" state: absent - hosts: kafka_to_nexus any_errors_fatal: true tasks: - name: copy file from kafka-to-nexus host fetch: src: "{{ kafka_to_nexus_data_dir }}/{{ integration_test_nexus_file_name }}" dest: "{{ integration_test_result_dir }}/" flat: yes - hosts: pipeline_data_generator any_errors_fatal: true tasks: - name: fail test if efu data generators failed fail: msg: data generator error when: efu_generator_result.rc!=0 - hosts: efu any_errors_fatal: true tasks: - name: fail test if efu counters check failed fail: msg: efu error when: efu_pipeline_counters_result.rc!=0 - hosts: test_orchestrator any_errors_fatal: true tasks: - name: check NeXus file contents shell: "{{ hdf5_base_dir }}/hdf5/bin/h5ls -r {{ integration_test_result_dir }}/{{ integration_test_nexus_file_name }}" register: nexus_file_h5ls_result ignore_errors: yes - name: print NeXus file h5ls stdout debug: msg: "{{ nexus_file_h5ls_result.stdout_lines }}" - name: print NeXus file h5ls stderr debug: msg: "{{ nexus_file_h5ls_result.stderr_lines }}" - name: check if NeXus file contains expected data shell: "{{ test_orchestrator_python_bin }} {{ test_orchestrator_script_dir }}/test-output-file.py {{ integration_test_result_dir }}/{{ integration_test_nexus_file_name }}" - name: fail test if NeXus file h5ls failed fail: msg: playbook failed when: nexus_file_h5ls_result.rc!=0
<filename>.github/workflows/ci.yaml name: AXI-CROSSBAR on: [push] jobs: lint-code: runs-on: macos-latest steps: - uses: actions/checkout@v2 - run: brew install verilator - run: ./flow.sh lint sim-tests: runs-on: macos-latest steps: - uses: actions/checkout@v2 - run: brew install icarus-verilog - run: brew install verilator - run: iverilog -V - run: verilator -V - run: git submodule update --init --recursive - run: ./flow.sh sim
<reponame>NicoBaumann454/alsp.net.api ### YamlMime:ManagedReference items: - uid: alps_.net_api.IOwlThing commentId: T:alps_.net_api.IOwlThing id: IOwlThing parent: alps_.net_api children: [] langs: - csharp - vb name: IOwlThing nameWithType: IOwlThing fullName: alps_.net_api.IOwlThing type: Interface source: remote: path: alps .net api/alps .net api/OwlThing.cs branch: master repo: http://imi-dev.imi.kit.edu:443/ukerz/alps-.net-api.git id: IOwlThing path: OwlThing.cs startLine: 6 assemblies: - alps.net_api namespace: alps_.net_api summary: "\nInterface of the Owl thing class\n" example: [] syntax: content: public interface IOwlThing content.vb: Public Interface IOwlThing modifiers.csharp: - public - interface modifiers.vb: - Public - Interface references: - uid: alps_.net_api commentId: N:alps_.net_api name: alps_.net_api nameWithType: alps_.net_api fullName: alps_.net_api
name: CI on: push: pull_request: # trigger a cron job every month schedule: - cron: '0 0 1 * *' jobs: matrix-gen: runs-on: ubuntu-latest outputs: matrix: ${{steps.list_tests.outputs.matrix}} steps: - uses: actions/checkout@v2 - id: list_tests run: | cd core/sim/rtl_sim/src/gap-attacks MATRIX=$(ls *.s43 | sed 's:.s43*$::' | jq -cnR '[inputs | select(length>0)]') echo $MATRIX echo "::set-output name=matrix::$MATRIX" bench: name: ${{ matrix.target }} runs-on: ubuntu-latest needs: matrix-gen strategy: fail-fast: false matrix: target: ${{ fromJson(needs.matrix-gen.outputs.matrix) }} runner: ['./run_gap'] include: - target: ./run_all runner: '' never-fail: 1 - target: test-env runner: './run_gap' never-fail: 1 steps: - name: Check out repository uses: actions/checkout@v2 - name: Install dependencies run: sudo apt-get install build-essential cmake iverilog tk binutils-msp430 gcc-msp430 msp430-libc msp430mcu expect-dev verilator -y - name: Build sancus-core run: mkdir build && cd build && cmake -DNEMESIS_RESISTANT=1 .. && cd .. - name: Run test bench env: EXPECT_FAIL: ${{ github.ref_name == 'mitigations' && matrix.never-fail != 1 }} run: | cd core/sim/rtl_sim/run/ ${{ matrix.runner }} ${{ matrix.target }}
apiVersion: minio.f110.dev/v1alpha1 kind: MinIOBucket metadata: name: test spec: selector: matchLabels: app: minio policy: readOnly createIndexFile: true --- apiVersion: minio.f110.dev/v1alpha1 kind: MinIOBucket metadata: name: test2 spec: selector: matchLabels: app: minio
# Copyright 2021 OpenHW Group # Solderpad Hardware License, Version 2.1, see LICENSE.md for details. # SPDX-License-Identifier: Apache-2.0 WITH SHL-2.1 # Run all lint checks name: lint on: [push, pull_request] env: VERIBLE_VERSION: 0.0-1051-gd4cd328 jobs: ################## # Verible Fromat # ################## format_verilog: name: Format Verilog Sources # This job runs on Linux (fixed ubuntu version) runs-on: ubuntu-18.04 steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: python-version: 3.9 - name: Install requirements run: pip install -r python-requirements.txt - name: Install Verible run: | set -e mkdir -p build/verible cd build/verible curl -Ls -o verible.tar.gz https://github.com/google/verible/releases/download/v$VERIBLE_VERSION/verible-v$VERIBLE_VERSION-Ubuntu-18.04-bionic-x86_64.tar.gz sudo mkdir -p /tools/verible && sudo chmod 777 /tools/verible tar -C /tools/verible -xf verible.tar.gz --strip-components=1 echo "PATH=$PATH:/tools/verible/bin" >> $GITHUB_ENV - name: Run Format run: | util/format-verible util/git-diff.py --error-msg "::error ::Found differences, run util/format-verible before committing." ##################### # Vendor Up-to-Date # ##################### # Check that all vendored sources are up-to-date. check-vendor: name: Vendor Up-to-Date runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: python-version: 3.9 - name: Install requirements run: pip install -r python-requirements.txt - name: Re-vendor and diff run: | find . \ -name '*.vendor.hjson' \ | xargs -n1 util/vendor.py --verbose \ && util/git-diff.py --error-msg "::error ::Found differences, please re-vendor." ################## # Doc Up-to-Date # ################## # Check that the documentation is up-to-date. doc-up-to-date: name: Doc Up-to-Date runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: python-version: 3.9 - name: Install requirements run: | pip install -r python-requirements.txt pip install -r docs/requirements.txt - name: Generate doc and diff run: | make -C docs doc && util/git-diff.py --error-msg "::error ::Found differences, please re-generate the documentation." #################### # Format YML Files # #################### yamlfmt: name: YAML Sources runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: python-version: 3.9 - name: Install requirements run: pip install -r python-requirements.txt # Check the proper formatting of all Bender.yml - name: Check YAML formatting run: | util/yml-format util/git-diff.py --error-msg "::error ::Found differences, run util/yml-format before committing."
# Copyright lowRISC contributors. # Licensed under the Apache License, Version 2.0, see LICENSE for details. # SPDX-License-Identifier: Apache-2.0 # Ibex configurations files, holds the parameter sets that are tested under CI. # Each configuration must specify the same set of parameters # Two-stage pipeline without additional branch target ALU and 3 cycle multiplier # (4 cycles for mulh), resulting in 2 stall cycles for mul (3 for mulh) small-3cmult: RV32E : False RV32M : True RV32B : False BranchTargetALU : False WritebackStage : False MultiplierImplementation : "fast" # =============================== # * EXPERIMENTAL CONFIGURATIONS * # =============================== # Three-stage pipeline with additional branch traget ALU and 1 cycle multiplier # (2 cycles for mulh) so mul does not stall (mulh stall 1 cycles). This is the # maximum performance configuration. experimental-maxperf-1cmult: RV32E : False RV32M : True RV32B : False BranchTargetALU : True WritebackStage : True MultiplierImplementation : "single-cycle" # maxpref-1cmult config above with bitmanip extension experimental-maxperf-bm-1cmult: RV32E : False RV32M : True RV32B : True BranchTargetALU : True WritebackStage : True MultiplierImplementation : "single-cycle"
<reponame>ajitmathew/HDMI2USB<filename>.travis.yml language: bash install: - source ./.travis/setup.sh script: - git push <EMAIL>:build "$BRANCH_NAME" notifications: email: - <EMAIL> irc: - "chat.freenode.net#timvideos" - "chat.freenode.net#hdmi2usb"
# # code.yml # <NAME> <<EMAIL>> # # Copyright (C) 2018 ETH Zurich, University of Bologna # Copyright and related rights are licensed under the Solderpad Hardware # License, Version 0.51 (the "License"); you may not use this file except in # compliance with the License. You may obtain a copy of the License at # http://solderpad.org/licenses/SHL-0.51. Unless required by applicable law # or agreed to in writing, software, hardware and materials distributed under # this License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. # # LOOP0 loop0: for i in range(0,nb_iter) # LOOP1 unused # LOOP2 unused # LOOP3 unused # LOOP4 unused # LOOP5 unused # mnemonics to simplify microcode writing mnemonics: a: 0 b: 1 c: 2 d: 3 nb_iter: 4 iter_stride: 5 one_stride: 6 # actual microcode code: loop0: - { op : add, a : a, b : iter_stride, } # move to next subset of a - { op : add, a : b, b : iter_stride, } # move to next subset of b - { op : add, a : c, b : one_stride, } # move to next subset of c - { op : add, a : d, b : one_stride, } # move to next subset of d
<filename>ivpm.yaml package: name: tblink-rpc-hdl version: 0.0.1 dev-deps: - name: tblink-rpc-core url: https://github.com/tblink-rpc/tblink-rpc-core.git - name: tblink-rpc-hdl url: https://github.com/tblink-rpc/tblink-rpc-hdl.git - name: pytblink-rpc url: https://github.com/tblink-rpc/pytblink-rpc.git - name: mkdv url: https://github.com/fvutils/mkdv.git - name: glog url: https://github.com/google/glog/archive/refs/tags/v0.5.0.tar.gz - name: googletest url: https://github.com/google/googletest/archive/release-1.10.0.tar.gz - name: cocotb src: pypi - name: cython src: pypi
<gh_stars>0 name: lint on: push: branches: - main jobs: black: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: psf/black@stable with: options: "--exclude=third_party/" isort: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: isort/[email protected]
version: "3.5" services: http: build: context: ./build target: http container_name: de10nano-http ports: - "8080:80" volumes: - type: bind source: ./downloads/ target: /downloads/ # sftp: # image: writl/sftp # container_name: de10nano-sftp # ports: # - "2222:22" # environment: # - USER:"sftp" # #- PASS:"<PASSWORD>"c<PASSWORD>T # volumes: # - type: bind # source: ./downloads/ # target: /data/incoming downloader: build: context: ./build target: downloader image: files-downloader container_name: files-downloader entrypoint: /bin/bash /download_packages.sh volumes: - type: bind source: ./downloads/ target: /downloads/
<reponame>thirono/basil<filename>basil/HL/keithley_2001.yaml # Device description for the Keithley 2001 Multimeter. # Only GPIB is possible with this device! identifier : KEITHLEY INSTRUMENTS INC.,MODEL 2001 get_current : MEAS:CURR? get_voltage : MEAS:VOLT? get_channels : ROUT:CLOSE:STAT? # If the multi channel switcher card Model 2000-SCAN Scanner Card # is installed the following works for voltage measurements channel 1: get_voltage : ROUT:OPEN ALL;:ROUT:CLOS (@1);:MEAS:VOLT? channel 2: get_voltage : ROUT:OPEN ALL;:ROUT:CLOS (@2);:MEAS:VOLT? channel 3: get_voltage : ROUT:OPEN ALL;:ROUT:CLOS (@3);:MEAS:VOLT? channel 4: get_voltage : ROUT:OPEN ALL;:ROUT:CLOS (@4);:MEAS:VOLT? channel 5: get_voltage : ROUT:OPEN ALL;:ROUT:CLOS (@5);:MEAS:VOLT? channel 6: get_voltage : ROUT:OPEN ALL;:ROUT:CLOS (@6);:MEAS:VOLT? channel 7: get_voltage : ROUT:OPEN ALL;:ROUT:CLOS (@7);:MEAS:VOLT? channel 8: get_voltage : ROUT:OPEN ALL;:ROUT:CLOS (@8);:MEAS:VOLT?
<filename>src_files.yml<gh_stars>0 redundancy_cells: files: [ # Level 0 lowrisc_ecc/prim_secded_13_8_dec.sv, lowrisc_ecc/prim_secded_13_8_enc.sv, lowrisc_ecc/prim_secded_22_16_dec.sv, lowrisc_ecc/prim_secded_22_16_enc.sv, lowrisc_ecc/prim_secded_39_32_dec.sv, lowrisc_ecc/prim_secded_39_32_enc.sv, lowrisc_ecc/prim_secded_72_64_dec.sv, lowrisc_ecc/prim_secded_72_64_enc.sv, rtl/TMR_voter.sv, rtl/TMR_word_voter, # Level 1 rtl/ecc_concat_32_64.sv, rtl/ecc_sram_wrap.sv, rtl/BUS_enc_dec/AXI_bus_ecc_dec.sv, rtl/BUS_enc_dec/AXI_bus_ecc_enc.sv, rtl/BUS_enc_dec/hci_core_intf_ecc_dec.sv, rtl/BUS_enc_dec/hci_core_intf_ecc_enc.sv, rtl/BUS_enc_dec/hci_mem_intf_ecc_dec.sv, rtl/BUS_enc_dec/hci_mem_intf_ecc_enc.sv, rtl/BUS_enc_dec/PE_XBAR_bus_ecc_dec.sv, rtl/BUS_enc_dec/PE_XBAR_bus_ecc_enc.sv, rtl/BUS_enc_dec/TCDM_XBAR_bus_ecc_dec.sv, rtl/BUS_enc_dec/TCDM_XBAR_bus_ecc_enc.sv, rlt/BUS_enc_dec/XBAR_DEMUX_BUS_ecc_dec.sv, rtl/BUS_enc_dec/XBAR_DEMUX_BUS_ecc_enc.sv, rtl/TMR_voter_detect.sv, # Level 2 rtl/bitwise_TMR_voter.sv, ]
name: Regression on: push: pull_request: schedule: - cron: 0 11 * * ? jobs: linux: runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v2 - name: Set up Python 3.7 uses: actions/setup-python@v2 with: python-version: 3.7 - name: Update pip run: python -m pip install --upgrade pip - name: Install dependencies run: sudo apt-get install g++-7 libgmp-dev libmpfr-dev libmpc-dev iverilog - name: Run regression test run: source regress.sh env: CC: gcc-7 CXX: g++-7 CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} mac: runs-on: macOS-latest steps: - name: Checkout uses: actions/checkout@v2 - name: Set up Python 3.7 uses: actions/setup-python@v2 with: python-version: 3.7 - name: Update pip run: python -m pip install --upgrade pip - name: Install dependencies run: | brew install icarus-verilog coreutils - name: Run regression test run: source regress.sh env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} # Windows testing doesn't look like it's going to work, at least for now, because # some of the packages used by fault don't support Windows. # windows: # runs-on: windows-latest # steps: # - name: Checkout # uses: actions/checkout@v2 # - name: Set up Python 3.7 # uses: actions/setup-python@v2 # with: # python-version: 3.7 # - name: Install dependencies # run: | # curl -L https://github.com/sgherbst/anasymod/releases/download/bogus/iverilog-v11-20201123-x64.tar.gz > iverilog-v11-20201123-x64.tar.gz # tar xzvf iverilog-v11-20201123-x64.tar.gz # echo `realpath iverilog/bin` >> $GITHUB_PATH # shell: bash # - name: Run regression test # run: | # source regress.sh # env: # CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} # shell: bash
name: Run an example description: Build an FPGA example, then run an example application on it inputs: fpga_path: description: Path to the FPGA example relative to the examples/fpga directory required: true default: "" app_path: description: Path to the application example relative to the examples/app directory required: true default: "" test: description: ID of the test to be ran. Currently only one test is supported required: true default: "" runs: using: composite steps: - name: Build the FPGA shell: bash run: | pushd ${GITHUB_WORKSPACE} bash envscr/run make -C ${PWD}/examples/fpga/${{ inputs.fpga_path }} popd - name: Make project for the application shell: bash run: | pushd ${GITHUB_WORKSPACE} bash envscr/run make -C ${PWD}/examples/app/${{ inputs.app_path }} popd - name: Run behavioral simulation of the application shell: bash run: | pushd ${GITHUB_WORKSPACE} bash envscr/run make -C ${PWD}/examples/app/${{ inputs.app_path }}/tests/${{ inputs.test }} behav popd - name: Run synthesis for the application shell: bash run: | pushd ${GITHUB_WORKSPACE} bash envscr/run make -C ${PWD}/examples/app/${{ inputs.app_path }}/app syn popd - name: Run post-synthesis simulation of the application shell: bash run: | pushd ${GITHUB_WORKSPACE} bash envscr/run make -C ${PWD}/examples/app/${{ inputs.app_path }}/tests/${{ inputs.test }} postsyn popd - name: Run the full RTL-to-bitstream flow for the application shell: bash run: | pushd ${GITHUB_WORKSPACE} bash envscr/run make -C ${PWD}/examples/app/${{ inputs.app_path }}/app popd - name: Run post-implementation simulation of the application shell: bash run: | pushd ${GITHUB_WORKSPACE} bash envscr/run make -C ${PWD}/examples/app/${{ inputs.app_path }}/tests/${{ inputs.test }} postimpl popd
# This is a basic workflow to help you get started with Actions name: SPI FPGA CI # Controls when the action will run. on: # Triggers the workflow on push or pull request events but only for the master branch push: branches: [ master, dev ] pull_request: branches: [ master, dev ] # Allows you to run this workflow manually from the Actions tab workflow_dispatch: # A workflow run is made up of one or more jobs that can run sequentially or in parallel jobs: spi_master_sim: name: SPI Master Simulation # The type of runner that the job will run on runs-on: ubuntu-latest # Steps represent a sequence of tasks that will be executed as part of the job steps: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - name: Checkout repository uses: actions/checkout@v2 - name: Setup GHDL # You may pin to the exact commit or the version. # uses: ghdl/setup-ghdl-ci@233774d8c0c5021af4c3686ea405013cb1494fd1 uses: ghdl/setup-ghdl-ci@nightly with: # Select GHDL backend (mcode, llvm or gcc) backend: llvm # optional, default is mcode - name: GHDL version check run: ghdl --version - name: Run Test 1 (CLK_FREQ=50e6, SPI_FREQ=1e6, WORD_SIZE=8) run: | cd ./sim/ sh ./spi_master_tb_ghdl_setup.sh ghdl -r SPI_MASTER_TB -gCLK_FREQ=50e6 -gSPI_FREQ=1e6 -gWORD_SIZE=8 -gTRANS_COUNT=2e4 - name: Run Test 2 (CLK_FREQ=12e6, SPI_FREQ=2e6, WORD_SIZE=8) run: | cd ./sim/ sh ./spi_master_tb_ghdl_setup.sh ghdl -r SPI_MASTER_TB -gCLK_FREQ=12e6 -gSPI_FREQ=2e6 -gWORD_SIZE=8 -gTRANS_COUNT=2e4 - name: Run Test 3 (CLK_FREQ=100e6, SPI_FREQ=5e6, WORD_SIZE=16) run: | cd ./sim/ sh ./spi_master_tb_ghdl_setup.sh ghdl -r SPI_MASTER_TB -gCLK_FREQ=100e6 -gSPI_FREQ=5e6 -gWORD_SIZE=16 -gTRANS_COUNT=2e4 - name: Run Test 4 (CLK_FREQ=25e6, SPI_FREQ=3e6, WORD_SIZE=16) run: | cd ./sim/ sh ./spi_master_tb_ghdl_setup.sh ghdl -r SPI_MASTER_TB -gCLK_FREQ=25e6 -gSPI_FREQ=3e6 -gWORD_SIZE=16 -gTRANS_COUNT=2e4 spi_slave_sim: name: SPI Slave Simulation # The type of runner that the job will run on runs-on: ubuntu-latest # Steps represent a sequence of tasks that will be executed as part of the job steps: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - name: Checkout repository uses: actions/checkout@v2 - name: Setup GHDL # You may pin to the exact commit or the version. # uses: ghdl/setup-ghdl-ci@233774d8c0c5021af4c3686ea405013cb1494fd1 uses: ghdl/setup-ghdl-ci@nightly with: # Select GHDL backend (mcode, llvm or gcc) backend: llvm # optional, default is mcode - name: GHDL version check run: ghdl --version - name: Run Test 1 (CLK_FREQ=50e6, SPI_FREQ=1e6, WORD_SIZE=8) run: | cd ./sim/ sh ./spi_slave_tb_ghdl_setup.sh ghdl -r SPI_SLAVE_TB -gCLK_FREQ=50e6 -gSPI_FREQ=1e6 -gWORD_SIZE=8 -gTRANS_COUNT=2e4 - name: Run Test 2 (CLK_FREQ=12e6, SPI_FREQ=2e6, WORD_SIZE=8) run: | cd ./sim/ sh ./spi_slave_tb_ghdl_setup.sh ghdl -r SPI_SLAVE_TB -gCLK_FREQ=12e6 -gSPI_FREQ=2e6 -gWORD_SIZE=8 -gTRANS_COUNT=2e4 - name: Run Test 3 (CLK_FREQ=100e6, SPI_FREQ=5e6, WORD_SIZE=16) run: | cd ./sim/ sh ./spi_slave_tb_ghdl_setup.sh ghdl -r SPI_SLAVE_TB -gCLK_FREQ=100e6 -gSPI_FREQ=5e6 -gWORD_SIZE=16 -gTRANS_COUNT=2e4 - name: Run Test 4 (CLK_FREQ=25e6, SPI_FREQ=3e6, WORD_SIZE=16) run: | cd ./sim/ sh ./spi_slave_tb_ghdl_setup.sh ghdl -r SPI_SLAVE_TB -gCLK_FREQ=25e6 -gSPI_FREQ=3e6 -gWORD_SIZE=16 -gTRANS_COUNT=2e4
############################################################################## ## This file is part of 'LCLS Timing Core'. ## It is subject to the license terms in the LICENSE.txt file found in the ## top-level directory of this distribution and at: ## https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html. : ## No part of 'LCLS Timing Core', including this file, ## may be copied, modified, propagated, or distributed except according to ## the terms contained in the LICENSE.txt file. ############################################################################## #schemaversion 3.0.0 #once EvrV2TriggerPulse.yaml #include EvrV2PulsegenRegs.yaml EvrV2TriggerPulse: &EvrV2TriggerPulse class: MMIODev description: LCLS-II Timing Pulse Generator Module size: 0x800 children: <<: *EvrV2PulsegenRegs
# Adapted from Garnet and ButterPHY name: inject_dont_touch commands: - | mkdir -p outputs python create_dont_touch.py dont_touch.tcl cat inputs/design.sdc dont_touch.tcl > outputs/design.sdc inputs: - design.sdc outputs: - design.sdc
<reponame>ess-dmsc/dmg-build-scripts --- - hosts: packet-generator gather_facts: False tasks: - name: stop rundemo script command: "killall rundemo" ignore_errors: True become: True tags: - generator - name: stop packet generator command: "./stopdemo" args: chdir: "{{script_path}}/pktgen_fpga_config/" become: True tags: - generator
# We do not support Ubuntu < 14.04 in OpTiMSoC # Unfortunately, travis does not support containerized builds in trusty sudo: required dist: trusty # Install the build requirements addons: apt: packages: - build-essential - tcl # for GLIP - libusb-1.0-0-dev # for GLIP TCP ("logic side") - libboost-dev # for the OSD software - libelf-dev - swig language: python python: # Python 3.4 is part of Ubuntu trusty, our lowest supported version - "3.4" # Environment env: global: # Installation target - TARGET_BASE=$HOME/optimsoc - PREBUILT_DEPLOY=optimsoc-prebuilt-deploy.py - PREBUILT_DEPLOY_URL=https://raw.githubusercontent.com/optimsoc/prebuilts/master/${PREBUILT_DEPLOY} # Install dependencies before_install: # dependency for fusesoc - pip install pyyaml # Create folder for target - mkdir -p ${TARGET_BASE} # Get prebuilts - curl ${PREBUILT_DEPLOY_URL} -o /tmp/${PREBUILT_DEPLOY} - chmod a+x /tmp/${PREBUILT_DEPLOY} - /tmp/${PREBUILT_DEPLOY} -d ${TARGET_BASE} all # Execute the build and install it install: - source ${TARGET_BASE}/setup_prebuilt.sh - ./tools/build.py --verbose --without-examples-fpga --with-examples-sim --without-docs - make install INSTALL_TARGET=${TARGET_BASE}/latest # Test the build script: - source ${TARGET_BASE}/latest/optimsoc-environment.sh - pytest -s -v test/systemtest/test_tutorial.py::TestTutorial
<filename>.gitlab/ci/serial_io.gitlab-ci.yml serial_io_test: before_script: - cd serial_io stage: test script: - make && make checks && cd chitchat && make && make checks && cd - && cd EVG_EVR && make
<gh_stars>10-100 name: ESI PR Gates on: pull_request: branches: - master - feature/* jobs: linux_build: name: Build and test on Ubuntu runs-on: ubuntu-latest container: image: teqdruid/esi:devenv-0.5 steps: - uses: actions/checkout@v2 - name: Cosim integration tests run: pytest -m nolic src/cosim - name: MLIR toolchain build run: cmake -S. -Bbuild && cmake --build build - name: MLIR toolchain tests run: cmake --build build --target check-esic # - name: Upload ESI Core Linux artifact # uses: actions/upload-artifact@v1 # with: # name: esi_core_linux # path: src/core/bin/esi_core_linux.tar.gz # linux_docker_run: # name: Test on bare Ubuntu image # needs: linux_build # runs-on: ubuntu-latest # container: # image: ubuntu:16.04 # steps: # - name: Install package deps # run: | # apt update # apt install libssl1.0.0 # - name: Download executable # uses: actions/download-artifact@v1 # with: # name: esi_core_linux # path: tarballs # - run: tar -zxvf tarballs/esi_core_linux.tar.gz # - uses: actions/checkout@v2 # with: # path: src # - name: Test SVCodeGen # run: esi_core_linux/SVCodeGen -i src/src/core/tests/stress_tests/stress1_synth.capnp
<reponame>anderflu/dicex path_cycles: 2612 done_cycles: 2613 pathIsWrong: 0 endIsCorrect: 1 startIsCorrect: 1 size: 31
<reponame>StanfordAHA/garnet name: glb_top commands: - bash get_glb_top_outputs.sh inputs: - design.v - header outputs: - glb_top_tt.lib - glb_top_tt.db - glb_top.lef - glb_top.gds - glb_top.vcs.v - glb_top.sdf - glb_top.lvs.v - glb_top.sram.spi - glb_top.sram.v - glb_top.sram.pwr.v - glb_top.sram_tt.db postconditions: - assert File( 'outputs/glb_top_tt.lib' ) # must exist - assert File( 'outputs/glb_top_tt.db' ) # must exist - assert File( 'outputs/glb_top.lef' ) # must exist - assert File( 'outputs/glb_top.gds' ) # must exist - assert File( 'outputs/glb_top.vcs.v' ) # must exist - assert File( 'outputs/glb_top.sdf' ) # must exist - assert File( 'outputs/glb_top.lvs.v' ) # must exist - assert File( 'outputs/glb_top.sram.spi' ) # must exist - assert File( 'outputs/glb_top.sram.v' ) # must exist - assert File( 'outputs/glb_top.sram.pwr.v' ) # must exist - assert File( 'outputs/glb_top.sram_tt.db' ) # must exist