Search is not available for this dataset
content
stringlengths 0
376M
|
---|
name: .NET Core 5.0
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
tests:
name: Samples and unit tests
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-latest, macos-latest ] # TODO windows-latest
steps:
- uses: actions/checkout@v2
- name: Setup .NET
uses: actions/setup-dotnet@v1
with:
dotnet-version: 5.0.*
- name: Setup libraries (Linux)
if: runner.os == 'Linux'
run: |
docker pull ghdl/ghdl:ubuntu20-mcode
sudo apt install libc6-dev libgdiplus
- name: Setup shell (Windows)
if: runner.os == 'Windows'
uses: msys2/setup-msys2@v2
with:
msystem: MINGW64
update: true
- name: Setup GHDL (Windows)
if: runner.os == 'Windows'
uses: ghdl/setup-ghdl-ci@nightly
with:
backend: llvm
- name: Setup libraries (MacOS)
if: runner.os == 'macOS'
run: |
mkdir -p ~/.docker/machine/cache
curl -Lo ~/.docker/machine/cache/boot2docker.iso https://github.com/boot2docker/boot2docker/releases/download/v19.03.12/boot2docker.iso
brew install docker docker-machine
docker-machine create --driver virtualbox default
docker-machine env default
eval $(docker-machine env default)
docker pull ghdl/ghdl:ubuntu20-mcode
brew install mono-libgdiplus
- name: Restore
run: dotnet restore src/SME.sln
- name: Build
run: dotnet build --no-restore src/UnitTest
- name: Test (Linux)
if: runner.os == 'Linux'
env:
SME_TEST_SKIP_VCD: 1
run: dotnet test --no-build --logger:"console;verbosity=detailed" src/UnitTest
- name: Test (Windows)
if: runner.os == 'Windows'
shell: powershell
env:
SME_TEST_SKIP_VCD: 1
SME_TEST_USE_NATIVE_GHDL: 1
run: |
& "$($env:MSYS2_PATH)MINGW64\bin\ghdl.exe" --version
dotnet test --no-build --logger:"console;verbosity=detailed" src/UnitTest
- name: Test (MacOS)
if: runner.os == 'macOS'
env:
SME_TEST_SKIP_VCD: 1
run: |
eval $(docker-machine env default)
dotnet test --no-build --logger:"console;verbosity=detailed" src/UnitTest
|
<filename>src_files.yml
ariane-sdhc:
incdirs: [
include,
]
files:
- rtl/IDDR.sv
- rtl/ODDR.sv
- rtl/sd_emmc_bistable_domain_cross.v
- rtl/sd_emmc_clock_divider.v
- rtl/sd_emmc_cmd_master.v
- rtl/sd_emmc_cmd_serial_host.v
- rtl/sd_emmc_controller_dma.v
- rtl/sd_emmc_controller_S00_AXI.v
- rtl/sd_emmc_crc_16.v
- rtl/sd_emmc_crc_7.v
- rtl/sd_emmc_data_master.v
- rtl/sd_emmc_data_serial_host.v
- rtl/sd_emmc_data_xfer_trig.v
- rtl/sd_emmc_edge_detect.v
- rtl/sd_emmc_fifo16kb.v
- rtl/sd_emmc_iddr.v
- rtl/sd_emmc_monostable_domain_cross.v
- rtl/sd_emmc_ODDR.v
- rtl/sd_emmc_controller.v
|
<reponame>slaclab/atlas-rd53-atca-dev
GitBase: ..
TopRoguePackage: atlas_rd53_atca_dev
RoguePackages:
- submodules/atlas-atca-link-agg-fw-lib/python
- submodules/atlas-rd53-fw-lib/python
- submodules/axi-pcie-core/python
- submodules/rce-gen3-fw-lib/python
- submodules/surf/python
- python
RogueConfig:
RogueScripts:
- ../software/scripts/gui.py
Targets:
###############################################
AtlasAtcaLinkAggRd53Rtm_EmuLpGbt:
ImageDir: targets/AtlasAtcaLinkAgg/AtlasAtcaLinkAggRd53Rtm_EmuLpGbt/images
Extensions:
- bit
- mcs
AtlasAtcaLinkAggRd53Rtm_Pgp4_6Gbps:
ImageDir: targets/AtlasAtcaLinkAgg/AtlasAtcaLinkAggRd53Rtm_Pgp4_6Gbps/images
Extensions:
- bit
- mcs
DpmIbertTester_1p50Gbps:
ImageDir: targets/RceDpm/DpmIbertTester_1p50Gbps/images
Extensions:
- bit
DpmIbertTester_6p25Gbps:
ImageDir: targets/RceDpm/DpmIbertTester_6p25Gbps/images
Extensions:
- bit
DpmRudpNode:
ImageDir: targets/RceDpm/DpmRudpNode/images
Extensions:
- bit
DpmPgp4_6Gbps:
ImageDir: targets/RceDpm/DpmPgp4_6Gbps/images
Extensions:
- bit
AtlasRd53FmcXilinxKcu105_EmuLpGbt:
ImageDir: targets/XilinxKcu105/AtlasRd53FmcXilinxKcu105_EmuLpGbt/images
Extensions:
- bit
- ltx
AtlasRd53FmcXilinxZcu102_EmuLpGbt:
ImageDir: targets/XilinxZcu102/AtlasRd53FmcXilinxZcu102_EmuLpGbt/images
Extensions:
- bin
AtlasRd53FmcXilinxZcu102_HybridLpGbt:
ImageDir: targets/XilinxZcu102/AtlasRd53FmcXilinxZcu102_HybridLpGbt/images
Extensions:
- bin
XilinxZcu102LpGbt:
ImageDir: targets/XilinxZcu102/XilinxZcu102LpGbt/images
Extensions:
- bin
Releases:
all:
Primary: True
Targets:
- AtlasAtcaLinkAggRd53Rtm_EmuLpGbt
- AtlasAtcaLinkAggRd53Rtm_Pgp4_6Gbps
- DpmIbertTester_1p50Gbps
- DpmIbertTester_6p25Gbps
# - DpmRudpNode
- DpmPgp4_6Gbps
- AtlasRd53FmcXilinxKcu105_EmuLpGbt
- AtlasRd53FmcXilinxZcu102_EmuLpGbt
- AtlasRd53FmcXilinxZcu102_HybridLpGbt
- XilinxZcu102LpGbt
Types:
- Rogue
|
<filename>.github/workflows/push.yml
name: 'push'
on: [ push, pull_request ]
env:
DOCKER_REGISTRY: docker.pkg.github.com
# https://github.com/tox-dev/tox/issues/1468
PY_COLORS: 1
jobs:
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- uses: actions/setup-python@v1
with:
python-version: 3.7
- name: install dependencies
run: |
pip install -U pip --progress-bar off
pip install -U virtualenv tox --progress-bar off
- name: run 'black'
run: tox -e py37-fmt -- --check
lin:
strategy:
fail-fast: false
max-parallel: 2
matrix:
task: [
37-lint,
36-unit,
37-unit,
]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- uses: actions/setup-python@v1
with:
python-version: 3.7
- name: install dependencies
run: |
pip install -U pip --progress-bar off
pip install -U virtualenv tox --progress-bar off
- name: run job
run: |
tox -e py${{ matrix.task }} -- --color=yes
docker:
strategy:
fail-fast: false
max-parallel: 2
matrix:
task: [
{do: 38-acceptance, tag: llvm},
{do: 38-vcomponents, tag: mcode},
]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
with:
submodules: recursive
- name: docker login
run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login -u vunit-gha --password-stdin "$DOCKER_REGISTRY"
- name: run job
run: |
docker run --rm -tv $(pwd):/src -w /src "$DOCKER_REGISTRY"/vunit/vunit/dev:${{ matrix.task.tag }} tox -e py${{ matrix.task.do }}-ghdl
- name: docker logout
run: docker logout "$DOCKER_REGISTRY"
if: always()
win:
strategy:
fail-fast: false
max-parallel: 4
matrix:
task: [
37-acceptance-ghdl,
37-vcomponents-ghdl,
37-lint,
36-unit,
37-unit,
]
runs-on: windows-latest
steps:
- uses: actions/checkout@v1
- name: git submodule update
run: git submodule update --init --recursive
if: (endsWith( matrix.task, '-lint' ) || endsWith( matrix.task, '-unit' )) == false
- uses: actions/setup-python@v1
with:
python-version: 3.7
- name: install dependencies
run: |
pip install -U pip --progress-bar off
pip install -U virtualenv tox --progress-bar off
- name: install GHDL
if: endsWith( matrix.task, '-ghdl' )
shell: bash
run: |
curl -fsSL -o ghdl.zip https://github.com/ghdl/ghdl/releases/download/v0.36/ghdl-0.36-mingw32-mcode.zip
7z x ghdl.zip "-o../ghdl" -y
mv ../ghdl/GHDL/0.36-mingw32-mcode/ ../ghdl-v0.36
rm -rf ../ghdl ghdl.zip
- name: run job
shell: bash
run: |
export PATH=$PATH:$(pwd)/../ghdl-v0.36/bin
tox -e py${{ matrix.task }} -- --color=yes
|
<gh_stars>100-1000
sudo: false
language: c++
os:
- linux
matrix:
exclude:
- os: linux
include:
- os: linux
docker: true
compiler: gcc
env: LLVM_VERSION=6.0 HWLOC_VERSION=1.11 DOCKERFILE=Ubuntu/16_04.64bit
- os: osx
compiler: clang
env: LLVM_VERSION=6.0 HWLOC_VERSION=2.0 CONDA=True
before_install:
- if [ "$TRAVIS_OS_NAME" = "osx" ] ; then
export MINICONDA_FILE="Miniconda3-latest-MacOSX-x86_64.sh";
else
export MINICONDA_FILE="Miniconda3-latest-Linux-x86_64.sh";
fi
- if [ "$CONDA" = "True" ] ; then
echo "Installing a fresh version of Miniconda.";
MINICONDA_URL="https://repo.continuum.io/miniconda";
curl -L -O "${MINICONDA_URL}/${MINICONDA_FILE}";
bash $MINICONDA_FILE -b;
source $HOME/miniconda3/bin/activate root;
conda config --add channels conda-forge;
conda install --yes --quiet llvmdev=${LLVM_VERSION}.* clangdev=${LLVM_VERSION}.* libhwloc=${HWLOC_VERSION}.*;
export LD_LIBRARY_PATH=$HOME/miniconda3/lib:$LD_LIBRARY_PATH;
fi
- if [ "$CONDA" = "True" ] ; then export MY_CMAKE_PREFIX_PATH="-DCMAKE_PREFIX_PATH=$HOME/miniconda3" ; fi
- if [ "$TRAVIS_OS_NAME" = "osx" ] ; then export MY_CMAKE_ICD_OFF="-DENABLE_ICD=OFF" ; fi
- if [ "$TRAVIS_OS_NAME" = "osx" ] && [ "$CXX" = "clang++" ] ; then MY_CMAKE_LIBCXX="-DCMAKE_CXX_FLAGS=-stdlib=libc++ -DCMAKE_EXE_LINKER_FLAGS=-Wl,-rpath,$HOME/miniconda3/lib" ; fi
- if [ "$TRAVIS_PULL_REQUEST" = "false" ]; then
export GIT_COMMIT="$TRAVIS_COMMIT";
else
export GH_PR=$TRAVIS_PULL_REQUEST;
fi
script:
- if [ "$CONDA" = "True" ] ; then
mkdir build && cd build;
cmake .. -DCMAKE_INSTALL_PREFIX=/tmp $MY_CMAKE_PREFIX_PATH $MY_CMAKE_LIBCXX $MY_CMAKE_ICD_OFF;
make -j2 && make check && make install;
fi
- if [ ! "$DOCKERFILE" = "" ] ; then
docker build -f tools/docker/$DOCKERFILE . --build-arg GH_PR=$GH_PR --build-arg GH_SLUG=$TRAVIS_REPO_SLUG --build-arg GH_COMMIT=$GIT_COMMIT --build-arg LLVM_VERSION=$LLVM_VERSION -t travis_ci_pocl_test;
docker run `docker images -q travis_ci_pocl_test`;
fi
notifications:
email: false
|
language: python
dist: Xenial
python: "3.7"
env:
- TEST: 'test.test_axi'
- TEST: 'test.test_bist'
- TEST: 'test.test_downconverter'
- TEST: 'test.test_ecc'
- TEST: 'test.test_examples'
- TEST: 'test.test_upconverter'
install:
- var1="$(pwd)"
## Get migen
- git clone https://github.com/m-labs/migen
- cd migen
- python3 setup.py develop
- cd $var1
## Get litex
- git clone https://github.com/enjoy-digital/litex
- cd litex
- python3 setup.py develop
- cd $var1
## Run common tests
- python -m unittest test.__init__
- python -m unittest test.common
script: python -m unittest $TEST
|
# Copyright 2020 ETH Zurich and University of Bologna.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
# Run functional regression checks
name: ci
on: [push, pull_request]
jobs:
##################
# Verilator Lint #
##################
verilator_lint:
name: Verilator Lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: 3.x
- name: Install requirements
run: pip install -r python-requirements.txt
- name: Install Verilator
run: |
echo 'deb http://download.opensuse.org/repositories/home:/phiwag:/edatools/xUbuntu_20.04/ /' | sudo tee /etc/apt/sources.list.d/home:phiwag:edatools.list
curl -fsSL https://download.opensuse.org/repositories/home:phiwag:edatools/xUbuntu_20.04/Release.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/home_phiwag_edatools.gpg > /dev/null
sudo apt update
sudo apt install verilator-4.100
- name: Run Lint
run: |
./util/verilog-lint
|
# Pad types must be one of digital, analog, or supply; pad names must be unique!
# This just shows you how you can template things with {{}}, if/else, and the following parameters:
# isInput: Boolean (each digital pad entry should be configurable between both input and output)
# isHorizontal: Boolean (each pad entry should be configurable between both horizontal and vertical)
# NOTE: Expects 1-bit in/out to be named in/out for digital; and 1-bit io for analog (supplies don't have ports)
# Expects module name to be obtained from {{name}} which is derived from yaml name, tpe in the Firrtl pass
# Pipe is used for stripping margins, but indentation is required before the pipe for the yaml reader to work
---
tpe: analog
name: slow_foundry
width: 0
height: 0
verilog: |
|// Foundry Analog Pad Example
|// Pad Orientation: {{#if isHorizontal}}Horizontal{{else}}Vertical{{/if}}
|// Call your instance PAD
|module {{name}}(
| inout io
|);
|endmodule
---
tpe: analog
name: fast_custom
width: 0
height: 0
verilog: |
|// Custom Analog Pad Example
|// Pad Orientation: {{#if isHorizontal}}Horizontal{{else}}Vertical{{/if}}
|// Call your instance PAD
|module {{name}}(
| inout io
|);
|endmodule
---
tpe: digital
name: from_tristate_foundry
width: 0
height: 0
verilog: |
|// Digital Pad Example
|// Signal Direction: {{#if isInput}}Input{{else}}Output{{/if}}
|// Pad Orientation: {{#if isHorizontal}}Horizontal{{else}}Vertical{{/if}}
|// Call your instance PAD
|module {{name}}(
| input in,
| output reg out
|);
| // Where you would normally dump your pad instance
| always @* begin
| out = in;
| end
|endmodule
---
tpe: digital
name: fake_digital
width: 0
height: 0
verilog: |
|// (Fake/Unused) Digital Pad Example
|// Signal Direction: {{#if isInput}}Input{{else}}Output{{/if}}
|// Pad Orientation: {{#if isHorizontal}}Horizontal{{else}}Vertical{{/if}}
|// Call your instance PAD
|module {{name}}(
| input in,
| output reg out
|);
| // Where you would normally dump your pad instance
| always @* begin
| out = in;
| end
|endmodule
---
tpe: supply
name: vdd
width: 0
height: 0
supplySetNum: 1
verilog: |
|// VDD Pad Example (No IO)
|// Can group some number together as required by the foundry
|// Pad Orientation: {{#if isHorizontal}}Horizontal{{else}}Vertical{{/if}}
|// Call your instance array PAD[0:0], PAD[2:0], etc.
|module {{name}}(
|);
|endmodule
---
tpe: supply
name: vss
width: 0
height: 0
supplySetNum: 2
verilog: |
|// VSS Pad Example (No IO)
|// Can group some number together as required by the foundry
|// Pad Orientation: {{#if isHorizontal}}Horizontal{{else}}Vertical{{/if}}
|// Call your instance array PAD[0:0], PAD[2:0], etc.
|module {{name}}(
|);
|endmodule
---
tpe: supply
name: avss
width: 0
height: 0
supplySetNum: 1
verilog: |
|// Analog VSS Pad Example (No IO)
|// Can group some number together as required by the foundry
|// Pad Orientation: {{#if isHorizontal}}Horizontal{{else}}Vertical{{/if}}
|// Call your instance array PAD[0:0], PAD[2:0], etc.
|module {{name}}(
|);
|endmodule
|
div_sqrt_top_mvp:
files: [
hdl/defs_div_sqrt_mvp.sv,
hdl/control_mvp.sv,
hdl/div_sqrt_mvp_wrapper.sv,
hdl/div_sqrt_top_mvp.sv,
hdl/iteration_div_sqrt_mvp.sv,
hdl/norm_div_sqrt_mvp.sv,
hdl/nrbd_nrsc_mvp.sv,
hdl/preprocess_mvp.sv,
]
|
icache_mp_128_pf:
incdirs: [
../../rtl/includes,
]
files: [
RTL/cache_controller_to_axi_128_PF.sv,
RTL/central_controller_128.sv,
RTL/icache_bank_mp_128.sv,
RTL/icache_bank_mp_PF.sv,
RTL/icache_top_mp_128_PF.sv,
RTL/merge_refill_cam_128_16.sv,
RTL/pf_miss_mux.sv,
RTL/prefetcher_if.sv,
]
|
<filename>fpga/modules/bram_accumulator/config.yml<gh_stars>10-100
---
name: bram_accumulator
board: boards/red-pitaya
cores:
- fpga/cores/comparator_v1_0
- fpga/cores/averager_counter_v1_0
- fpga/cores/delay_trig_v1_0
- fpga/cores/edge_detector_v1_0
control_registers:
- avg # set to 1 to perform averaging
- avg_period # defines the period on which waveforms are averaged
- avg_threshold # must be set to (avg_period - 5)
- n_avg_min # minimum number of averaged waveforms
status_registers:
- n_avg # number of averaged waveforms
- avg_ready # equals 1 when the data is ready
parameters:
bram_addr_width: 8
adc_width: 14
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
# Azure template for archiving pipeline step outputs and uploading them.
#
# This template will archive all of $BIN_DIR, and upload it for use by
# downstream jobs using download-artifacts-template.yml.
#
# This template expects that a variable $BUILD_ROOT is set. See
# util/build_consts.sh for more information.
parameters:
# Rsync-style file patterns to include in the partial BIN_DIR output.
- name: includePatterns
type: object
default: []
steps:
- bash: |
set -e
test -n "$BUILD_ROOT"
. util/build_consts.sh
# Write all include patterns to a file used by rsync.
echo "${{ join('\n', parameters.includePatterns) }}" > "$BUILD_ROOT/include_patterns.txt"
echo
echo Files matching these patterns will be included in the binary build artifact for this job:
echo vvvvvvvvvvvvvvvvvv
cat "$BUILD_ROOT/include_patterns.txt"
echo ^^^^^^^^^^^^^^^^^^
# The file upstream_bin_dir_contents.txt lists all files which were part
# of an "upstream" BIN_DIR which got downloaded at the beginning of this
# job. Ensure that this file exists, even if no upstream BIN_DIR was
# downloaded.
touch "$BUILD_ROOT/upstream_bin_dir_contents.txt"
BIN_DIR_FULL="${BIN_DIR}.full"
mv "$BIN_DIR" "$BIN_DIR_FULL"
mkdir -p "$BIN_DIR"
echo
echo Copying files into the output archive:
rsync \
--archive \
--verbose \
--remove-source-files \
--prune-empty-dirs \
--exclude-from="$BUILD_ROOT/upstream_bin_dir_contents.txt" \
--include="*/" \
--include-from="$BUILD_ROOT/include_patterns.txt" \
--exclude="*" \
"${BIN_DIR_FULL}/" "${BIN_DIR}/"
echo
echo 'Files in $BIN_DIR not considered outputs of this job:'
echo vvvvvvvvvvvvvvvvvv
find "$BIN_DIR_FULL"
echo ^^^^^^^^^^^^^^^^^^
tar -C "$BUILD_ROOT" \
-cvf "$BUILD_ROOT/build-bin.tar" \
"${BIN_DIR#"$BUILD_ROOT/"}"
displayName: Archive step outputs
- publish: "$(Build.ArtifactStagingDirectory)/build-bin.tar"
# The PhaseName is the string after the "job" key in the build description,
# e.g. "job: my_phase_name".
artifact: partial-build-bin-$(System.PhaseName)
displayName: Upload step outputs
|
<reponame>pan185/UnarySim
# This file defines single architecture set for tlut systolic array performance projection
- proj_16_16_bank8_block8
- proj_32_32_bank8_block8
- proj_64_64_bank8_block8
- proj_128_128_bank8_block8
|
# .readthedocs.yml
# Read the Docs configuration file
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
version: 2
# Declare the Python requirements required to build your docs
python:
version: 3.8
install:
- requirements: docs/requirements.txt
|
name: hls4ml-tutorial-0.4.0
channels:
- conda-forge
dependencies:
- python=3.7
- jupyterhub
- pydot
- graphviz
- pip
- pip:
- jupyter
- tensorflow==2.3.1
- hls4ml[profiling]==0.4.0
- git+git://github.com/google/qkeras.git#egg=qkeras
|
name: Build
on:
pull_request:
push:
branches:
- master
- develop
- release/**
tags: '[0-9]+.[0-9]+.[0-9]+'
paths-ignore:
- 'doc/**'
- 'README.md'
- 'CODE_OF_CONDUCT.md'
- 'CONTRIBUTING.md'
- 'LICENSE'
- 'SECURITY.md'
- 'data/**'
- '.github/**'
- '!.github/workflows/build.yml'
release:
types:
- created
# Keep in sync with codeql-analysis.yml
env:
CI: true
node: 14.x
java: 15
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Setup Node.js
uses: actions/setup-node@v1
with:
node-version: '${{ env.node }}'
- name: Setup Java
uses: actions/setup-java@v1
with:
java-version: '${{ env.java }}'
- uses: actions/checkout@v2
- name: Cache Gradle dependencies
uses: actions/cache@v2
with:
path: ~/.gradle/caches
key: ${{ runner.os }}-gradle-${{ hashFiles('**/*.gradle') }}
restore-keys: |
${{ runner.os }}-gradle-
- name: Cache node modules
uses: actions/cache@v2
with:
path: node_modules
key: ${{ runner.os }}-node_modules-${{ hashFiles('**/yarn.lock') }}
restore-keys: |
${{ runner.os }}-node_modules-
- name: Production Build
run: ./gradlew -Pprod -Pwar clean bootWar
- name: Upload Artifact
uses: actions/upload-artifact@v2
with:
name: Artemis.war
path: build/libs/Artemis-*.war
- name: Upload Release Artifact
if: github.event_name == 'release' && github.event.action == 'created'
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ github.event.release.upload_url }}
asset_path: build/libs/Artemis-${{ github.event.release.tag_name }}.war
asset_name: Artemis.war
asset_content_type: application/x-webarchive
server-tests:
if: github.event_name != 'release'
runs-on: ubuntu-latest
# services:
# athene-segmentation:
# image: ls1intum/athene-segmentation
# ports:
# - 8000:8000
# athene-embedding:
# image: ls1intum/athene-embedding
# ports:
# - 8001:8000
# athene-clustering:
# image: ls1intum/athene-clustering
# ports:
# - 8002:8000
steps:
- name: Setup Java
uses: actions/setup-java@v1
with:
java-version: '${{ env.java }}'
- uses: actions/checkout@v2
- name: Cache Gradle dependencies
uses: actions/cache@v2
with:
path: ~/.gradle/caches
key: ${{ runner.os }}-gradle-${{ hashFiles('**/*.gradle') }}
restore-keys: |
${{ runner.os }}-gradle-
- name: Java Tests
run: ./gradlew --console=plain executeTests jacocoTestReport -x yarn -x webpack jacocoTestCoverageVerification
- name: "Codacy: Report coverage"
uses: codacy/codacy-coverage-reporter-action@master
with:
project-token: ${{ secrets.CODACY_PROJECT_TOKEN }}
coverage-reports: build/reports/jacoco/test/jacocoTestReport.xml
if: (github.event.pull_request.head.repo.full_name == github.event.pull_request.base.repo.full_name) && (success() || failure())
- name: Java Code Style
run: ./gradlew spotlessCheck
if: success() || failure()
- name: Java Documentation
run: ./gradlew checkstyleMain -x yarn -x webpack
if: success() || failure()
client-tests:
if: github.event_name != 'release'
runs-on: ubuntu-latest
steps:
- name: Setup Node.js
uses: actions/setup-node@v1
with:
node-version: '${{ env.node }}'
- uses: actions/checkout@v2
- name: Cache node modules
uses: actions/cache@v2
with:
path: node_modules
key: ${{ runner.os }}-node_modules-${{ hashFiles('**/yarn.lock') }}
restore-keys: |
${{ runner.os }}-node_modules-
- name: Install Dependencies
run: yarn install
- name: TypeScript Tests
run: yarn test:coverage --ci
- name: "Codacy: Report coverage"
uses: codacy/codacy-coverage-reporter-action@master
with:
project-token: ${{ secrets.CODACY_PROJECT_TOKEN }}
coverage-reports: coverage/lcov.info
if: (github.event.pull_request.head.repo.full_name == github.event.pull_request.base.repo.full_name) && (success() || failure())
- name: TypeScript Formatting
run: yarn prettier:check
if: success() || failure()
- name: TypeScript Code Style
run: yarn lint
if: success() || failure()
docs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: ammaraskar/sphinx-action@master
with:
docs-folder: "docs/"
- uses: actions/upload-artifact@v1
with:
name: Documentation
path: docs/_build/html/
|
## Uses the deployment flow described here: https://github.com/peaceiris/actions-gh-pages#%EF%B8%8F-deploy-to-external-repository-external_repository
# The cucapra/calyx-docs repository contains the public deployment key and the main Calyx repository contains the private key enabling it to write to the docs repository.
name: Docs website
on:
push:
branches:
- master
jobs:
playground:
name: Docs website
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Setup mdBook
uses: peaceiris/actions-mdbook@v1
with:
mdbook-version: 'latest'
- name: mdbook
run: mdbook build
- name: Install Rust stable
uses: actions-rs/toolchain@v1
with:
toolchain: 1.58.0
override: true
components: rustfmt, clippy
- name: Build source documentation
uses: actions-rs/cargo@v1
with:
command: doc
args: -p calyx --no-deps
- name: Move source documentation
run: |
mv ./target/doc ./book/source
- name: Create CNAME files
run: |
echo 'docs.calyxir.org' > ./book/CNAME
- name: deploy
uses: peaceiris/actions-gh-pages@v3
with:
publish_branch: main
deploy_key: ${{ secrets.DOCS_DEPLOY_TOKEN }}
external_repository: cucapra/calyx-docs
publish_dir: ./book
|
<gh_stars>10-100
derived_clks:
tb_i:
abspath: 'tb_i'
emu_clk: 'emu_clk'
emu_rst: 'emu_rst'
emu_dt: 'emu_dt'
dt_req: 'dt_req'
gated_clk_req: 'clk_val'
gated_clk: 'clk_i'
|
<reponame>sergev/vak-opensource
execution:
- concurrency: 10
hold-for: 5m
ramp-up: 2m
scenario: yaml_example
scenarios:
yaml_example:
retrieve-resources: false
requests:
- http://example.com/
reporting:
- module: final-stats
- module: console
settings:
check-interval: 5s
default-executor: jmeter
provisioning: local
|
sudo: false
language: c
os:
- linux
- osx
compiler:
- gcc
- clang
env:
global:
- BUILD_LIBPCAP=true
# encrypted COVERITY_SCAN_TOKEN from
# https://scan.coverity.com/projects/<project_id>/submit_build?tab=travis_ci
- secure: "<KEY>
# Coverity run condition (avoid matrix multiple runs), need customized
# build script. Need an update if new matrix cases.
- coverity_scan_run_condition='"$TRAVIS_OS_NAME" = linux -a "$CC" = gcc -a "$REMOTE" = enable -a "$CMAKE" = no'
# Coverity script test mode (if true no uploading, avoid reaching the quota)
# usual processing: false.
- coverity_scan_script_test_mode=false
matrix:
# NOTE: REMOTE= is for the libpcap build, which is done with autotools
# even if we're building tcpdump with CMake.
- REMOTE=disable CMAKE=no
- REMOTE=disable CMAKE=yes
- REMOTE=enable CMAKE=no
- REMOTE=enable CMAKE=yes
matrix:
fast_finish: true
addons:
coverity_scan:
# customized build script URL
# TRAVIS_REPO_SLUG: owner_name/repo_name of repository currently being built
# TRAVIS_BRANCH: name of the branch currently being built
build_script_url: https://raw.githubusercontent.com/$TRAVIS_REPO_SLUG/$TRAVIS_BRANCH/.travis-coverity-scan-build.sh
# project metadata
project:
name: $TRAVIS_REPO_SLUG
# Where email notification of build analysis results will be sent
#notification_email: <EMAIL>
# Commands to prepare for build_command
build_command_prepend: ./configure
# This command will be added as an argument to "cov-build" to compile
# the project for analysis
build_command: make
# Pattern to match selecting branches that will run analysis
branch_pattern: coverity_scan
apt:
packages:
- libusb-1.0-0-dev
- libdbus-glib-1-dev
- libbluetooth-dev
- libnl-genl-3-dev
- libibverbs-dev
- libssl-dev
- libssl0.9.8
- libssl1.0.0
- libdnet-dev
- libsmi2-dev
- libcap-ng-dev
- libpcap-dev
git:
quiet: true
depth: 3
before_install:
- uname -a
- date
- if [ "$TRAVIS_OS_NAME" = osx ]; then brew update >/dev/null; fi
install:
- if [ "$TRAVIS_OS_NAME" = osx ]; then brew install libsmi | grep -v '%'; fi
before_script:
- if [ "$BUILD_LIBPCAP" = true ]; then (cd .. && echo '$ git clone [...] libpcap.git' && git clone --depth=50 --branch=master --quiet git://github.com/the-tcpdump-group/libpcap.git && cd libpcap && ./configure "--${REMOTE}-remote" --prefix=/tmp && make && make install); fi
script:
- if [ "$COVERITY_SCAN_BRANCH" != 1 -a "$CMAKE" = no ]; then touch .devel configure; fi
- if [ "$COVERITY_SCAN_BRANCH" != 1 -a "$CMAKE" = no ]; then echo '$ ./configure [...]' && echo -n travis_fold:start:script.configure; fi
- if [ "$COVERITY_SCAN_BRANCH" != 1 -a "$CMAKE" = no ]; then ./configure CPPFLAGS="-I/usr/local/Cellar/openssl/1.0.2l/include/" --prefix=/tmp; fi
- if [ "$COVERITY_SCAN_BRANCH" != 1 -a "$CMAKE" = yes ]; then mkdir build; fi
- if [ "$COVERITY_SCAN_BRANCH" != 1 -a "$CMAKE" = yes ]; then cd build; fi
- if [ "$COVERITY_SCAN_BRANCH" != 1 -a "$CMAKE" = yes ]; then cmake -DCMAKE_PREFIX_PATH=/tmp -DCMAKE_INSTALL_PREFIX=/tmp $ENABLE_REMOTE ..; fi
- if [ "$COVERITY_SCAN_BRANCH" != 1 ]; then echo -n travis_fold:end:script.configure; fi
- if [ "$COVERITY_SCAN_BRANCH" != 1 ]; then make -s; fi
- if [ "$COVERITY_SCAN_BRANCH" != 1 ]; then echo '$ make install [...]' && echo -n travis_fold:start:script.make_install; fi
- if [ "$COVERITY_SCAN_BRANCH" != 1 ]; then PATH=$PATH make install; fi
- if [ "$COVERITY_SCAN_BRANCH" != 1 ]; then echo -n travis_fold:end:script.make_install; fi
- if [ "$COVERITY_SCAN_BRANCH" != 1 -a "$CMAKE" = no ]; then make check; fi
- if [ "$COVERITY_SCAN_BRANCH" != 1 ]; then ./tcpdump -D; fi
- if [ "$COVERITY_SCAN_BRANCH" != 1 ]; then ./tcpdump -J; fi
- if [ "$COVERITY_SCAN_BRANCH" != 1 ]; then ./tcpdump --version; fi
- if [ "$COVERITY_SCAN_BRANCH" != 1 -a "$CMAKE" = no ]; then make releasetar; fi
|
<gh_stars>10-100
package:
name: timer_unit
sources:
# Level 0
- rtl/timer_unit_counter.sv
- rtl/timer_unit_counter_presc.sv
# Level 1
- rtl/apb_timer_unit.sv
- rtl/timer_unit.sv
|
# Adapted from Garnet and ButterPHY
name: submodule
commands:
- cd outputs
- cp ../../../build_prbs/18-cadence-innovus-signoff/results/prbs_generator_syn-merged.gds ./prbs_generator_syn.gds
- cp ../../../build_prbs/21-synopsys-ptpx-genlibdb/outputs/design.db ./prbs_generator_syn.db
- cp ../../../build_prbs/21-synopsys-ptpx-genlibdb/outputs/design.lib ./prbs_generator_syn.lib
- cp ../../../build_prbs/18-cadence-innovus-signoff/results/prbs_generator_syn.lef ./prbs_generator_syn.lef
- cp ../../../build_prbs/20-open-magic-def2spice/outputs/design_extracted.spice ./prbs_generator_syn.spice
- cp ../../../build_prbs/18-cadence-innovus-signoff/results/prbs_generator_syn.lvs.v ./prbs_generator_syn.lvs.v
- cp ../../../build_16t4/18-cadence-innovus-signoff/results/hr_16t4_mux_top-merged.gds ./hr_16t4_mux_top.gds
- cp ../../../build_16t4/21-synopsys-ptpx-genlibdb/outputs/design.db ./hr_16t4_mux_top.db
- cp ../../../build_16t4/21-synopsys-ptpx-genlibdb/outputs/design.lib ./hr_16t4_mux_top.lib
- cp ../../../build_16t4/18-cadence-innovus-signoff/results/hr_16t4_mux_top.lef ./hr_16t4_mux_top.lef
- cp ../../../build_16t4/20-open-magic-def2spice/outputs/design_extracted.spice ./hr_16t4_mux_top.spice
- cp ../../../build_16t4/18-cadence-innovus-signoff/results/hr_16t4_mux_top.lvs.v ./hr_16t4_mux_top.lvs.v
- cp ../../../build_4t1/19-cadence-innovus-signoff/results/qr_4t1_mux_top-merged.gds ./qr_4t1_mux_top.gds
- cp ../../../build_4t1/22-synopsys-ptpx-genlibdb/outputs/design.db ./qr_4t1_mux_top.db
- cp ../../../build_4t1/22-synopsys-ptpx-genlibdb/outputs/design.lib ./qr_4t1_mux_top.lib
- cp ../../../build_4t1/19-cadence-innovus-signoff/results/qr_4t1_mux_top.lef ./qr_4t1_mux_top.lef
- cp ../../../build_4t1/21-open-magic-def2spice/outputs/design_extracted.spice ./qr_4t1_mux_top.spice
- cp ../../../build_4t1/19-cadence-innovus-signoff/results/qr_4t1_mux_top.lvs.v ./qr_4t1_mux_top.lvs.v
- cp ../../../mdll/17-cadence-innovus-signoff/results/osc_core-merged.gds ./osc_core.gds
- cp ../../../mdll/17-cadence-innovus-signoff/results/osc_core.lef ./osc_core.lef
- cp ../../../mdll/20-synopsys-ptpx-genlibdb/osc_core.db ./osc_core.db
- cp ../../../mdll/20-synopsys-ptpx-genlibdb/osc_core.lib ./osc_core.lib
- cp ../../../mdll/19-open-magic-def2spice/outputs/design_extracted.spice ./osc_core.spice
- cp ../../../mdll/17-cadence-innovus-signoff/results/osc_core.lvs.v ./osc_core.lvs.v
- cp ../../../fftl/19-cadence-innovus-signoff/results/fine_freq_track-merged.gds ./fine_freq_track.gds
- cp ../../../fftl/19-cadence-innovus-signoff/results/fine_freq_track.lef ./fine_freq_track.lef
- cp ../../../fftl/19-cadence-innovus-signoff/results/fine_freq_track.lvs.v ./fine_freq_track.lvs.v
- cp ../../../fftl/22-synopsys-ptpx-genlibdb/fine_freq_track.db ./fine_freq_track.db
- cp ../../../fftl/22-synopsys-ptpx-genlibdb/fine_freq_track.lib ./fine_freq_track.lib
- cp ../../../fftl/21-open-magic-def2spice/outputs/design_extracted.spice ./fine_freq_track.spice
inputs:
outputs:
- prbs_generator_syn.db
- prbs_generator_syn.lib
- prbs_generator_syn.lef
- prbs_generator_syn.spice
- prbs_generator_syn.gds
- prbs_generator_syn.lvs.v
- hr_16t4_mux_top.db
- hr_16t4_mux_top.lib
- hr_16t4_mux_top.lef
- hr_16t4_mux_top.spice
- hr_16t4_mux_top.gds
- hr_16t4_mux_top.lvs.v
- qr_4t1_mux_top.db
- qr_4t1_mux_top.lib
- qr_4t1_mux_top.lef
- qr_4t1_mux_top.spice
- qr_4t1_mux_top.gds
- qr_4t1_mux_top.lvs.v
- fine_freq_track.db
- fine_freq_track.lib
- fine_freq_track.lef
- fine_freq_track.spice
- fine_freq_track.gds
- fine_freq_track.lvs.v
- osc_core.db
- osc_core.lib
- osc_core.lef
- osc_core.spice
- osc_core.gds
- osc_core.lvs.v
|
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
description: ssd traffic detection on private dataset.
input size: 360*480
float ops: 11.6G
task: detection
framework: caffe
prune: '0.9'
version: 2.0
files:
- name: cf_ssdtraffic_360_480_11.6G_2.0
type: float & quantized
board: GPU
download link: https://www.xilinx.com/bin/public/openDownload?filename=cf_ssdtraffic_360_480_0.9_11.6G_2.0.zip
checksum: 8a68b7efbfef6e5d2fb9dc32970c65a6
- name: ssd_traffic_pruned_0_9
type: xmodel
board: zcu102 & zcu104 & kv260
download link: https://www.xilinx.com/bin/public/openDownload?filename=ssd_traffic_pruned_0_9-zcu102_zcu104_kv260-r2.0.0.tar.gz
checksum: 7af73db624c46afed475a90bca4281dd
- name: ssd_traffic_pruned_0_9
type: xmodel
board: vck190
download link: https://www.xilinx.com/bin/public/openDownload?filename=ssd_traffic_pruned_0_9-vck190-r2.0.0.tar.gz
checksum: dace0ac5a2c092cc23ba1c6a0699ff1e
- name: ssd_traffic_pruned_0_9
type: xmodel
board: vck50006pe-DPUCVDX8H-DWC
download link: https://www.xilinx.com/bin/public/openDownload?filename=ssd_traffic_pruned_0_9-vck50006pe-DPUCVDX8H-DWC-r2.0.0.tar.gz
checksum: 9e9eaefc0bd9877ebcb90cc9db5abc99
- name: ssd_traffic_pruned_0_9
type: xmodel
board: vck50008pe-DPUCVDX8H
download link: https://www.xilinx.com/bin/public/openDownload?filename=ssd_traffic_pruned_0_9-vck50008pe-DPUCVDX8H-r2.0.0.tar.gz
checksum: c86e007321836613d396662b2c501194
- name: ssd_traffic_pruned_0_9
type: xmodel
board: u50lv-DPUCAHX8H
download link: https://www.xilinx.com/bin/public/openDownload?filename=ssd_traffic_pruned_0_9-u50lv-DPUCAHX8H-r2.0.0.tar.gz
checksum: 42c846cedd9d0d9aaf0c48f2371c844f
- name: ssd_traffic_pruned_0_9
type: xmodel
board: u50lv-DPUCAHX8H-DWC & u55c-DPUCAHX8H-DWC
download link: https://www.xilinx.com/bin/public/openDownload?filename=ssd_traffic_pruned_0_9-u55c-u50lv-DPUCAHX8H-DWC-r2.0.0.tar.gz
checksum: e9b2ab1ad0e9c09d1c1c5e14e50b5d5e
license: https://github.com/Xilinx/Vitis-AI/blob/master/LICENSE
|
# Solderpad Hardware License, Version 0.51, see LICENSE for details.
# SPDX-License-Identifier: SHL-0.51
package:
name: pspin
authors:
- <NAME> <<EMAIL>>
dependencies:
axi: {path: deps/pulp_platform_axi}
common_cells: {path: deps/pulp_platform_common_cells}
tech_cells_generic: {path: deps/pulp_platform_tech_cells_generic}
axi_riscv_atomics: {path: deps/pulp_platform_axi_riscv_atomics}
snitch_cluster: {path: deps/pulp_platform_snitch_cluster/hw/ip/snitch_cluster}
future: {path: deps/pulp_platform_future}
sources:
# Level 0:
- src/pspin_cfg_pkg.sv
# Level 1:
- src/snitch_cluster_cfg_pkg.sv
- src/memories/sram.sv
- src/pkt_scheduler/fifo_engine.sv
- src/pkt_scheduler/cluster_rb.sv
- src/soc_dma/soc_dma.sv
- src/cmds/host_direct.sv
- src/cmds/cmd_xbar.sv
- src/cmds/cluster_cmd.sv
- src/hpu_driver/task_frontend.sv
- src/hpu_driver/cmd_frontend.sv
# Level 2:
- src/interconnects/cluster_demux.sv
- src/interconnects/dma_mux.sv
- src/interconnects/host_mst_mux.sv
- src/interconnects/l2_xbar.sv
- src/interconnects/nhi_xbar.sv
- src/interconnects/pe_xbar.sv
- src/interconnects/service_xbar.sv
- src/memories/l2_mem.sv
- src/memories/prog_mem.sv
- src/pkt_scheduler/cluster_scheduler.sv
- src/hpu_driver/hpu_driver.sv
- src/pkt_scheduler/mpq_engine.sv
- src/pkt_scheduler/scheduler.sv
- src/soc_dma/soc_dma_wrap.sv
# Level 3:
- src/pspin.sv
- target: simulation
files:
# Level 0
- modelsim_model/src/clk_rst_gen.sv
- modelsim_model/src/pspin_tb_cfg_pkg.sv
- modelsim_model/src/cmd_sink.sv
# Level 1
- modelsim_model/src/packet_generator.sv
# Level 3
- modelsim_model/src/pspin_tb.sv
- target: verilator
files:
# Level 0
- verilator_model/tb/pspin_verilator.sv
|
configMapGenerator:
- name: challenge-skeleton-config
files:
- pow
- nsjail.cfg
generatorOptions:
disableNameSuffixHash: true
labels:
type: generated
annotations:
note: generated
|
<reponame>milad621/livehd<gh_stars>100-1000
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# A scrape configuration containing exactly one endpoint to scrape:
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: livehd_exporter
scrape_interval: 2m
scrape_timeout: 2m
metrics_path: "/metrics"
static_configs:
- targets: ['localhost:8000']
# Alertmanager settings
rule_files:
- './alert.rules.yml'
alerting:
alertmanagers:
- static_configs:
- targets:
- 'localhost:9093'
|
<filename>scripts/prometheus/alert.rules.yml
groups:
- name: Execution-time(task_THREAD_POOL_pool1_thread_pool_test)
rules:
- alert: 'Execution time spikes'
expr: (task_THREAD_POOL_pool1_thread_pool_test_secs) > 0.4
labels:
severity: 'critical'
annotations:
title: "Execution Time"
description: 'Execution time of pool_test set to 0.4 sec.'
summary: "Execution time is `{{humanize $value}}sec`"
host: "{{$labels.instance}}"
- name: IPC(task_THREAD_POOL_pool1_thread_pool_test)
rules:
- alert: 'IPC dropped'
expr: (task_THREAD_POOL_pool1_thread_pool_test_IPC) < 0.5
labels:
severity: 'critical'
annotations:
title: "IPC"
description: 'IPC threshold set to 0.5.'
summary: "IPC is `{{humanize $value}}`"
host: "{{$labels.instance}}"
- name: BR_MPKI(task_THREAD_POOL_pool1_thread_pool_test)
rules:
- alert: 'BR_MPKI spikes'
expr: (task_THREAD_POOL_pool1_thread_pool_test_BR_MPKI) > 1.0
labels:
severity: 'critical'
annotations:
title: "BR_MPKI"
description: 'BR_MPKI threshold set to 1.0.'
summary: "BR_MPKI is `{{humanize $value}}`"
host: "{{$labels.instance}}"
- name: L2_MPKI(task_THREAD_POOL_pool1_thread_pool_test)
rules:
- alert: 'L2_MPKI spikes'
expr: (task_THREAD_POOL_pool1_thread_pool_test_L2_MPKI) > 25.0
labels:
severity: 'critical'
annotations:
title: "L2_MPKI"
description: 'L2_MPKI threshold set to 25.0.'
summary: "L2_MPKI is `{{humanize $value}}`"
host: "{{$labels.instance}}"
|
<gh_stars>1-10
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
description: Single Person Pose Estimation Model
input size: 224*128
float ops: 548.6M
task: pose estimation
framework: caffe
prune: 'no'
version: 2.0
files:
- name: cf_SPnet_aichallenger_224_128_0.54G_2.0
type: float & quantized
board: GPU
download link: https://www.xilinx.com/bin/public/openDownload?filename=cf_SPnet_aichallenger_224_128_0.54G_2.0.zip
checksum: 0135a1a61423332b84255842958c0908
- name: sp_net
type: xmodel
board: zcu102 & zcu104 & kv260
download link: https://www.xilinx.com/bin/public/openDownload?filename=sp_net-zcu102_zcu104_kv260-r2.0.0.tar.gz
checksum: 25f8e3d75485bb5a65fabef74edf4ba9
- name: sp_net
type: xmodel
board: vck190
download link: https://www.xilinx.com/bin/public/openDownload?filename=sp_net-vck190-r2.0.0.tar.gz
checksum: f4441233e340b727ba21068d3a7edcde
- name: sp_net
type: xmodel
board: vck50006pe-DPUCVDX8H-DWC
download link: https://www.xilinx.com/bin/public/openDownload?filename=sp_net-vck50006pe-DPUCVDX8H-DWC-r2.0.0.tar.gz
checksum: 56092bc17e057499a57aa62dd3b4a68f
- name: sp_net
type: xmodel
board: vck50008pe-DPUCVDX8H
download link: https://www.xilinx.com/bin/public/openDownload?filename=sp_net-vck50008pe-DPUCVDX8H-r2.0.0.tar.gz
checksum: 0ca78e24367e7f463b315572f03fe7a4
- name: sp_net
type: xmodel
board: u50lv-DPUCAHX8H
download link: https://www.xilinx.com/bin/public/openDownload?filename=sp_net-u50lv-DPUCAHX8H-r2.0.0.tar.gz
checksum: 35a0041772568a1dc7ea8bc5979ab443
- name: sp_net
type: xmodel
board: u50lv-DPUCAHX8H-DWC & u55c-DPUCAHX8H-DWC
download link: https://www.xilinx.com/bin/public/openDownload?filename=sp_net-u55c-u50lv-DPUCAHX8H-DWC-r2.0.0.tar.gz
checksum: 78fc542c26f498c20330a0a0c38474d5
- name: sp_net
type: xmodel
board: u200-DPUCADF8H & u250-DPUCADF8H
download link: https://www.xilinx.com/bin/public/openDownload?filename=sp_net-u200-u250-r2.0.0.tar.gz
checksum: 4b1cf2e009fa80cca46413fed9863422
license: https://github.com/Xilinx/Vitis-AI/blob/master/LICENSE
|
<gh_stars>10-100
---
name: laser-controller
board: boards/red-pitaya
version: 0.1.1
cores:
- fpga/cores/redp_adc_v1_0
- fpga/cores/redp_dac_v1_0
- fpga/cores/axi_ctl_register_v1_0
- fpga/cores/axi_sts_register_v1_0
- fpga/cores/dna_reader_v1_0
- fpga/cores/pdm_v1_0
- fpga/cores/pulse_generator_v1_0
- fpga/cores/comparator_v1_0
- fpga/cores/bus_multiplexer_v1_0
- fpga/cores/saturation_v1_0
- fpga/cores/at93c46d_spi_v1_0
memory:
- name: control
offset: '0x60000000'
range: 4K
- name: status
offset: '0x50000000'
range: 4K
protection: read
- name: xadc
offset: '0x43C00000'
range: 64K
control_registers:
- led
- laser_current
- laser_control
- power_setpoint
- dac[n_dac]
- eeprom_ctl
status_registers:
- adc[n_adc]
- pid_control
- eeprom_sts
parameters:
fclk0: 200000000
dac_width: 14
adc_width: 14
pwm_width: 12
n_pwm: 4
n_dac: 2
n_adc: 2
xdc:
- boards/red-pitaya/config/ports.xdc
- boards/red-pitaya/config/clocks.xdc
- ./expansion_connector.xdc
drivers:
- server/drivers/common.hpp
- server/drivers/xadc.hpp
- server/drivers/laser.hpp
- server/drivers/eeprom.hpp
web:
- web/koheron.ts
- web/laser.ts
- web/laser-control.html
- ./app.ts
- ./index.html
- web/main.css
|
derived_clks:
tb_emu_io:
abspath: 'tb_i'
emu_clk: 'emu_clk'
emu_rst: 'emu_rst'
iacore_clk_adc:
abspath: 'tb_i.top_i.iacore'
emu_clk: 'emu_clk'
emu_rst: 'emu_rst'
gated_clk_req: 'clk_adc_val'
gated_clk: 'clk_adc_i'
|
package: github.com/hitchnsmile/go-hangman
import:
- package: github.com/nsf/termbox-go
- package: github.com/mattn/go-sqlite3
version: ^1.2.0
- package: github.com/mattn/go-runewidth
version: ^0.0.2
|
<filename>models/AI-Model-Zoo/model-list/cf_inceptionv4_imagenet_299_299_24.5G_2.0/model.yaml<gh_stars>1-10
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
description: inception-v4 classifier on ImageNet.
input size: 299*299
float ops: 24.5G
task: classification
framework: caffe
prune: 'no'
version: 2.0
files:
- name: cf_inceptionv4_imagenet_299_299_24.5G_2.0
type: float & quantized
board: GPU
download link: https://www.xilinx.com/bin/public/openDownload?filename=cf_inceptionv4_imagenet_299_299_24.5G_2.0.zip
checksum: bf23be4f31422b7c3109c63056d45f87
- name: inception_v4
type: xmodel
board: zcu102 & zcu104 & kv260
download link: https://www.xilinx.com/bin/public/openDownload?filename=inception_v4-zcu102_zcu104_kv260-r2.0.0.tar.gz
checksum: e7088f5d7723d5d1d6d5177cbf5d945d
- name: inception_v4
type: xmodel
board: vck190
download link: https://www.xilinx.com/bin/public/openDownload?filename=inception_v4-vck190-r2.0.0.tar.gz
checksum: 2aa4fbe7260f38b9c66acf1c77b0632a
- name: inception_v4
type: xmodel
board: vck50006pe-DPUCVDX8H-DWC
download link: https://www.xilinx.com/bin/public/openDownload?filename=inception_v4-vck50006pe-DPUCVDX8H-DWC-r2.0.0.tar.gz
checksum: 08b674dd11f8b82bc44d08d0e17bf769e
- name: inception_v4
type: xmodel
board: u50lv-DPUCAHX8H
download link: https://www.xilinx.com/bin/public/openDownload?filename=inception_v4-u50lv-DPUCAHX8H-r2.0.0.tar.gz
checksum: b6a9f23b25d893bf3d201a9269239fd6
- name: inception_v4
type: xmodel
board: u50lv-DPUCAHX8H-DWC & u55c-DPUCAHX8H-DWC
download link: https://www.xilinx.com/bin/public/openDownload?filename=inception_v4-u55c-u50lv-DPUCAHX8H-DWC-r2.0.0.tar.gz
checksum: 9a463619dc55835f1f28babd0665975d
license: https://github.com/Xilinx/Vitis-AI/blob/master/LICENSE
|
cryoAsicGen1:
enable: True
ForceWrite: False
dataWriter:
enable: True
dataFile: ''
open: False
bufferSize: 0
maxFileSize: 0
XilinxKcu1500Pgp3:
enable: True
AxiPcieCore:
enable: True
AxiPciePhy:
enable: True
AxiVersion:
enable: True
ScratchPad: 0x000000
FpgaReloadHalt: 0x0
FpgaReloadAddress: 0x0
UserReset: 0x0
DmaIbAxisMon:
enable: True
DmaObAxisMon:
enable: True
Lane[0]:
enable: True
AutoStatus: False
Loopback: 0x0
SkipInterval: 65520
FlowControlDisable: False
TxDisable: False
Lane[1]:
enable: True
AutoStatus: False
Loopback: 0x0
SkipInterval: 65520
FlowControlDisable: False
TxDisable: False
Lane[2]:
enable: True
AutoStatus: False
Loopback: 0x0
SkipInterval: 65520
FlowControlDisable: False
TxDisable: False
Lane[3]:
enable: True
AutoStatus: False
Loopback: 0x0
SkipInterval: 65520
FlowControlDisable: False
TxDisable: False
Lane[4]:
enable: True
AutoStatus: False
Loopback: 0x0
SkipInterval: 65520
FlowControlDisable: False
TxDisable: False
Lane[5]:
enable: True
AutoStatus: False
Loopback: 0x0
SkipInterval: 65520
FlowControlDisable: False
TxDisable: False
Lane[6]:
enable: True
AutoStatus: False
Loopback: 0x0
SkipInterval: 65520
FlowControlDisable: False
TxDisable: False
Lane[7]:
enable: True
AutoStatus: False
Loopback: 0x0
SkipInterval: 65520
FlowControlDisable: False
TxDisable: False
EpixHRGen1Cryo:
enable: True
AxiVersion:
enable: True
ScratchPad: 0x000000
FpgaReloadHalt: 0x0
FpgaReloadAddress: 0x0
UserReset: 0x0
Clock Jitter Cleaner:
enable: True
RstL: True
Dec: False
Inc: False
Frqtbl: False
FrqtblZ: True
Rate: 0x0
RateZ: 0x0
BwSel: 0x0
BwSelZ: 0x2
FreqSel: 0x8
FreqSelZ: 0x0
Sfout: 0x2
SfoutZ: 0x1
DeserRegisters:
enable: True
StreamsEn_n: 0x0
Resync: False
14bData_ser0:
enable: True
14bData_ser1:
enable: True
PacketRegisters:
enable: True
decDataBitOrder: True
StreamDataMode: False
StopDataTx: False
ResetCounters: False
asicDataReq: 2048
runControl:
enable: True
runState: Stopped
runRate: 1 Hz
|
<filename>ultra96/ROOT_FS/app/fad/data/Handler/param.yaml
%YAML 1.2
---
Basis:
terminate_distance: 0.40 # 終了地点からこの距離以内に車体が到達したら終了する
optor_dev_filename: "/dev/ttyUSB0"
# 立ち上がり時間(s)
StartupDelay:
wo_calculating: 0.001
io_calculating: 0.001
vo_calculating: 0.001
localizationing: 0.001
planning: 0.001
tracing: 0.001
debagging: 0.001
# 実行周期(s)
ExecutePeriod:
wo_calculating: 0.008
io_calculating: 0.020
vo_calculating: 0.030
localizationing: 0.010
planning: 0.100
tracing: 0.010
debagging: 1.500
Debug:
img_side_length: 2.25
interval_imshow_debug_map: 1
imwrite_mode: 0
|
language: minimal
services: docker
script: docker run --rm -tv $(pwd):/src -w /src ghdl/vunit:llvm python3 /src/run.py
|
resolver: nightly-2018-11-24
packages:
- .
allow-newer: true
extra-deps:
- git: <EMAIL>:clash-lang/clash-compiler.git
commit: 67d36495cc108afad767e92d1ee21e12411cc9e2
extra-dep: false
subdirs:
- clash-ghc
- clash-lib
- clash-prelude
compiler-check: newer-minor
|
<gh_stars>1-10
language: vhdl
os: linux
before_install:
- sudo apt-get update
- sudo apt-get install -y gnat zlib1g-dev
- git clone https://github.com/ghdl/ghdl
install:
- cd ghdl
- git pull origin master
- ./configure --prefix=/usr/local
- make
- sudo make install
- sudo ldconfig
- cd ..
script:
- make test
after_script:
- make clean
|
<reponame>micprog/pulp
overrides:
tech_cells_generic: { git: "<EMAIL>:pulp-platform/tech_cells_generic.git", version: 0.2.2 }
axi: { git: "<EMAIL>:pulp-platform/axi.git", version: 0.7.2 } # really old (now 0.24?)
common_verification: { git: "<EMAIL>:pulp-platform/common_verification.git", version: 0.2.0 }
udma_core: { git: "<EMAIL>:pulp-platform/udma_core.git", rev: "c36a405f33275576700746b0d90eab0ce278e600" }
|
<gh_stars>1-10
KerasJson: example-keras-model-files/KERAS_1layer.json
KerasH5: example-keras-model-files/KERAS_1layer_weights.h5
OutputDir: my-hls-test
ProjectName: myproject
XilinxPart: xc7vx690tffg1927-2
ClockPeriod: 5
IOType: io_parallel # options: io_serial/io_parallel
ReuseFactor: 1
DefaultPrecision: ap_fixed<18,8>
|
<reponame>sil2100/Vitis-AI<filename>models/AI-Model-Zoo/model-list/tf_vgg16_imagenet_224_224_30.96G_2.0/model.yaml
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
description: vgg16 classifier on ImageNet.
input size: 224*224
float ops: 30.96G
task: classification
framework: tensorflow
prune: 'no'
version: 2.0
files:
- name: tf_vgg16_imagenet_224_224_30.96G_2.0
type: float & quantized
board: GPU
download link: https://www.xilinx.com/bin/public/openDownload?filename=tf_vgg16_imagenet_224_224_30.96G_2.0.zip
checksum: 94ea16d5c3b0e00e20c9c4b37fbeb540
- name: vgg_16_tf
type: xmodel
board: zcu102 & zcu104 & kv260
download link: https://www.xilinx.com/bin/public/openDownload?filename=vgg_16_tf-zcu102_zcu104_kv260-r2.0.0.tar.gz
checksum: 3a23699349f29a2c31c2a85e1309b048
- name: vgg_16_tf
type: xmodel
board: vck190
download link: https://www.xilinx.com/bin/public/openDownload?filename=vgg_16_tf-vck190-r2.0.0.tar.gz
checksum: e8cca853bb0fe61c7590f8b9d56793da
- name: vgg_16_tf
type: xmodel
board: vck50006pe-DPUCVDX8H-DWC
download link: https://www.xilinx.com/bin/public/openDownload?filename=vgg_16_tf-vck50006pe-DPUCVDX8H-DWC-r2.0.0.tar.gz
checksum: a463dbaec58018479494592b124da6dc
- name: vgg_16_tf
type: xmodel
board: u50lv-DPUCAHX8H
download link: https://www.xilinx.com/bin/public/openDownload?filename=vgg_16_tf-u50lv-DPUCAHX8H-r2.0.0.tar.gz
checksum: d175f6c313bac431d9b55cad3419773b
- name: vgg_16_tf
type: xmodel
board: u50lv-DPUCAHX8H-DWC & u55c-DPUCAHX8H-DWC
download link: https://www.xilinx.com/bin/public/openDownload?filename=vgg_16_tf-u55c-u50lv-DPUCAHX8H-DWC-r2.0.0.tar.gz
checksum: 1514cd5bfc6b74c89f79278e507cf2d2
license: https://github.com/Xilinx/Vitis-AI/blob/master/LICENSE
|
<filename>Bender.yml
package:
name: Pulpissimo
authors:
- "<NAME> <<EMAIL>>"
- "<NAME> <<EMAIL>>"
- "<NAME> <<EMAIL>>"
- "<NAME> <<EMAIL>>"
- "<NAME> <<EMAIL>>"
- "<NAME> <<EMAIL>>"
- "<NAME> <<EMAIL>>"
- "<NAME> <<EMAIL>>"
- "<NAME> <<EMAIL>>"
- "<NAME> <<EMAIL>>"
dependencies:
common_cells: { git: "https://github.com/pulp-platform/common_cells.git", version: 1.21.0 }
jtag_pulp: { git: "https://github.com/pulp-platform/jtag_pulp.git", version: 0.1.0 }
pulp_soc: { git: "https://github.com/pulp-platform/pulp_soc.git", version: 4.0.0 }
tbtools: { git: "https://github.com/pulp-platform/tbtools.git", version: 0.2.1 }
tech_cells_generic: { git: "https://github.com/pulp-platform/tech_cells_generic.git", version: 0.2.3 }
workspace:
# package_links:
# ips/tbtools: tbtools # symlinks for backwards compatibility with IPApprox
# checkout_dir: deps
export_include_dirs:
- rtl/includes
sources:
# Source files grouped in levels. Files in level 0 have no dependencies on files in this
# package. Files in level 1 only depend on files in level 0, files in level 2 on files in
# levels 1 and 0, etc. Files within a level are ordered alphabetically.
# Level 0
- rtl/pulpissimo/jtag_tap_top.sv
- rtl/pulpissimo/pad_frame.sv
- rtl/pulpissimo/pad_control.sv
- rtl/pulpissimo/soc_domain.sv
- rtl/pulpissimo/rtc_date.sv
- rtl/pulpissimo/rtc_clock.sv
- rtl/pulpissimo/safe_domain_reg_if.sv
# Level 1
- rtl/pulpissimo/safe_domain.sv
- rtl/pulpissimo/pulpissimo.sv
# TB sources
- target: any(test,simulation)
files:
- rtl/tb/riscv_pkg.sv
- rtl/tb/jtag_pkg.sv
- rtl/tb/pulp_tap_pkg.sv
- rtl/tb/srec/srec_pkg.sv
- rtl/tb/tb_clk_gen.sv
- rtl/tb/tb_pulp.sv
- rtl/tb/SimJTAG.sv
- rtl/tb/SimDTM.sv
# Open models
- target: any(test,simulation)
files:
- rtl/vip/spi_master_padframe.sv
- rtl/vip/uart_sim.sv
- rtl/vip/camera/cam_vip.sv
# S25FS256_model (SPI Flash)
- target: all(any(test,simulation), flash_vip)
defines:
SPEEDSIM: ~
files:
- rtl/vip/spi_flash/S25fs256s/model/s25fs256s.v
# 24FC1025 model (I2C flash)
- target: all(any(test,simulation), i2c_vip)
defines:
SPEEDSIM: ~
files:
- rtl/vip/i2c_eeprom/24FC1025.v
# i2s model
- target: all(any(test,simulation), i2s_vip)
defines:
SPEEDSIM: ~
files:
- rtl/vip/i2s/i2c_if.v
- rtl/vip/i2s/i2s_vip_channel.sv
- rtl/vip/i2s/i2s_vip.sv
|
# Copyright 2020-2021 ETH Zurich and University of Bologna.
# Solderpad Hardware License, Version 0.51, see LICENSE for details.
# SPDX-License-Identifier: SHL-0.51
package:
name: riscv-dbg
authors:
- <NAME> <<EMAIL>>
- <NAME> <<EMAIL>>
dependencies:
tech_cells_generic: { git: "https://github.com/pulp-platform/tech_cells_generic.git", version: 0.2.3 }
common_cells: {git: https://github.com/pulp-platform/common_cells.git, version: 1.21.0}
sources:
files:
# Level 1:
- src/dm_pkg.sv
- debug_rom/debug_rom.sv
- debug_rom/debug_rom_one_scratch.sv
# Level 2:
- src/dm_csrs.sv
- src/dm_mem.sv
- src/dmi_cdc.sv
- target: not(all(xilinx, bscane))
files:
- src/dmi_jtag_tap.sv
- target: all(xilinx, bscane)
files:
- src/dmi_bscane_tap.sv
# Level 3:
- src/dm_sba.sv
- src/dm_top.sv
- src/dmi_jtag.sv
# Level 4:
- src/dm_obi_top.sv
- target: simulation
files:
- src/dmi_test.sv
- target: test
files:
# Level 1
- src/dmi_intf.sv
- tb/jtag_dmi/jtag_intf.sv
- tb/jtag_dmi/jtag_test.sv
# Level 3
- tb/jtag_dmi/tb_jtag_dmi.sv
|
<gh_stars>0
package:
name: mkdv
version: 0.0.1
dev-deps:
- name: colorama
src: pypi
- name: jinja2
src: pypi
- name: pyyaml
src: pypi
- name: allure-python-commons
src: pypi
- name: markdown
src: pypi
- name: jsonschema
src: pypi
- name: toposort
src: pypi
- name: fusesoc
url: https://github.com/olofk/fusesoc.git
- name: Sphinx
src: pypi
- name: sphinx-rtd-theme
src: pypi
- name: sphinx-jsonschema
src: pypi
- name: cairosvg
src: pypi
- name: breathe
src: pypi
- name: funcparserlib
src: pypi
version: 1.0.0a0
# - name: mistune
# src: pypi
# version: <2.0.0
- name: sphinxcontrib-makedomain
src: pypi
- name: sphinxcontrib-openapi
src: pypi
- name: sphinxcontrib-spelling
src: pypi
# - name: sphinxcontrib-seqdiag
# src: pypi
# version: <3.0.0
- name: sphinx-issues
src: pypi
- name: sphinx-argparse
src: pypi
- name: pyenchant
src: pypi
|
<filename>ci/vivado-builds.yml
build:kc705_basex__2020.2:
extends: .template_vivado_build
variables:
VIVADO_VERSION: "2020.2"
dependencies:
- quick_check:kc705_basex__2020.2
build:kc705_basex__2019.2:
extends: .template_vivado_build
variables:
VIVADO_VERSION: "2019.2"
dependencies:
- quick_check:kc705_basex__2019.2
build:kc705_gmii__2020.2:
extends: .template_vivado_build
variables:
VIVADO_VERSION: "2020.2"
dependencies:
- quick_check:kc705_gmii__2020.2
build:kc705_gmii__2019.2:
extends: .template_vivado_build
variables:
VIVADO_VERSION: "2019.2"
dependencies:
- quick_check:kc705_gmii__2019.2
build:kcu105_basex__2020.2:
extends: .template_vivado_build
variables:
VIVADO_VERSION: "2020.2"
dependencies:
- quick_check:kcu105_basex__2020.2
build:kcu105_basex__2019.2:
extends: .template_vivado_build
variables:
VIVADO_VERSION: "2019.2"
dependencies:
- quick_check:kcu105_basex__2019.2
build:zcu102_basex__2020.2:
extends: .template_vivado_build
variables:
VIVADO_VERSION: "2020.2"
dependencies:
- quick_check:zcu102_basex__2020.2
build:zcu102_basex__2019.2:
extends: .template_vivado_build
variables:
VIVADO_VERSION: "2019.2"
dependencies:
- quick_check:zcu102_basex__2019.2
# build:zcu102_c2c_loopback__2019.2:
# extends: .template_vivado_build
# variables:
# VIVADO_VERSION: "2019.2"
# dependencies:
# - quick_check:zcu102_c2c_loopback__2019.2
build:k800__2020.2:
extends: .template_vivado_build
variables:
VIVADO_VERSION: "2020.2"
dependencies:
- quick_check:k800__2020.2
build:k800__2019.2:
extends: .template_vivado_build
variables:
VIVADO_VERSION: "2019.2"
dependencies:
- quick_check:k800__2019.2
build:vcu118_pcie__2020.2:
extends: .template_vivado_build
variables:
VIVADO_VERSION: "2020.2"
dependencies:
- quick_check:vcu118_pcie__2020.2
build:vcu118_pcie__2019.2:
extends: .template_vivado_build
variables:
VIVADO_VERSION: "2019.2"
dependencies:
- quick_check:vcu118_pcie__2019.2
build:vcu118_sgmii__2020.2:
extends: .template_vivado_build
variables:
VIVADO_VERSION: "2020.2"
dependencies:
- quick_check:vcu118_sgmii__2020.2
build:vcu118_sgmii__2019.2:
extends: .template_vivado_build
variables:
VIVADO_VERSION: "2019.2"
dependencies:
- quick_check:vcu118_sgmii__2019.2
|
<gh_stars>0
filespec:
- vlnv: tblink-rpc-gw::tblink-rpc-cmdproc_1_1-synth
out:
- name: MKDV_VL_SRCS
type:
- verilogSource
- systemVerilogSource
flags: sv
- name: MKDV_VL_INCDIRS
type:
- verilogSource
- systemVerilogSource
flags: sv
include: True
|
defaults:
- do_blink/figures
- override hydra/job_logging: colorlog
- override hydra/hydra_logging: colorlog
|
<reponame>tblink-bfms/tblink-bfms-i2c
package:
name: tblink-bfms-rv
version: 0.0.1
setup-deps:
- tblink-bfms
deps:
- name: tblink-bfms
url: https://github.com/tblink-bfms/tblink-bfms.git
- name: pytblink-rpc
url: https://github.com/tblink-rpc/pytblink-rpc.git
- name: tblink-rpc-cocotb
url: https://github.com/tblink-rpc/tblink-rpc-cocotb.git
dev-deps:
- name: tblink-bfms
url: https://github.com/tblink-bfms/tblink-bfms.git
- name: pytblink-rpc
url: https://github.com/tblink-rpc/pytblink-rpc.git
- name: tblink-rpc-cocotb
url: https://github.com/tblink-rpc/tblink-rpc-cocotb.git
- name: tblink-rpc-hdl
url: https://github.com/tblink-rpc/tblink-rpc-hdl.git
- name: tblink-rpc-gw
url: https://github.com/tblink-rpc/tblink-rpc-gw.git
- name: fwprotocol-defs
url: https://github.com/featherweight-ip/fwprotocol-defs.git
- name: cocotb
src: pypi
- name: mkdv
url: https://github.com/fvutils/mkdv.git
- name: uvm
url: https://www.accellera.org/images/downloads/standards/uvm/uvm-1.2.tar.gz
- name: vlsim
src: pypi
|
on:
- push
name: macOS CI
jobs:
build:
name: Build (macOS)
runs-on: macos-10.15
steps:
- uses: actions/checkout@v2
with:
submodules: true
- name: Get LLVM and Bison
run: |
brew install llvm bison
echo "LLVM_CONFIG=$(brew --prefix llvm)/bin/llvm-config" >> $GITHUB_ENV
echo "BISON=$(brew --prefix bison)/bin/bison" >> $GITHUB_ENV
echo "NPROC=$(sysctl -n hw.ncpu)" >> $GITHUB_ENV
- name: Make
run: |
${{ env.LLVM_CONFIG }} --link-static # Is this imperative?
make -j${{ env.NPROC }} release
|
<reponame>cromulencellc/hackasat-final-2021<filename>flatsat/docker-compose.yml
version: "3.6"
services:
mosquitto:
image: eclipse-mosquitto:2-openssl
ports:
- 1883:1883
networks:
default:
ipv4_address: ${MQTT_IP}
user: ${DOCKER_USER_UID}:${DOCKER_USER_GID}
volumes:
- ./mosquitto/mosquitto/config:/mosquitto/config
- ./mosquitto/mosquitto/data:/mosquitto/data
logging:
options:
max-size: "25M"
management-center:
image: cedalo/management-center:2
ports:
- 8088:8088
depends_on:
- mosquitto
networks:
default:
ipv4_address: ${MQTT_GUI_IP}
environment:
CEDALO_MC_BROKER_ID: mosquitto-2.0
CEDALO_MC_BROKER_NAME: Mosquitto 2.0
CEDALO_MC_BROKER_URL: mqtt://mosquitto:1883
CEDALO_MC_BROKER_USERNAME: cedalo
CEDALO_MC_BROKER_PASSWORD: <PASSWORD>
CEDALO_MC_USERNAME: cedalo
CEDALO_MC_PASSWORD: "<PASSWORD>"
logging:
options:
max-size: "25M"
fortytwo:
image: has2/finals/42/42:latest
environment:
- TEAM_NUMBER=${TEAM_NUMBER}
- MQTT_IP=${MQTT_IP}
- MQTT_REPUBLISH_INTERVAL=5000
depends_on:
- mosquitto
volumes:
- ./42/HAS2:/home/has/42/HAS2
networks:
default:
ipv4_address: ${FORTYTWO_IP}
logging:
options:
max-size: "25M"
fortytwo-bridge:
image: has2/finals/fortytwo-mqtt-bridge/fortytwo-bridge:latest
environment:
- MQTT_HOST=${MQTT_IP}
- GAME_START_STATE=STOPPED
- PAYLOAD_FOV=10
- LOG_LEVEL=INFO
depends_on:
- mosquitto
networks:
- default
logging:
options:
max-size: "25M"
cosmos:
image: has2/finals/opensatkit-docker/cosmos:latest
environment:
- DISPLAY=$DISPLAY
- QT_X11_NO_MITSHM=1
- HOST_IP=${HOST_IP}
- PDB_IP=${PDB_IP}
- FLATSAT_TO_IP=${CDH_IP}
- CHALLENGER_TEAM=${CHALLENGER_TEAM}
- REPORTAPI_IP=${REPORTAPI_GATEWAY}
- BEELINK_IP=${BEELINK_IP}
- TELEMETRY_DB_URL=${TELEMETRY_DB_URL}
- MQTT_IP=${MQTT_IP}
volumes:
- ./opensatkit/cosmos:/cosmos
user: "${DOCKER_USER_UID}:${DOCKER_USER_GID}"
network_mode: "host"
networks:
default:
external: true
name: ${DOCKER_NETWORK}
|
<reponame>SubjeBilisim/anasymod<gh_stars>10-100
verilog_sources:
osc_checker:
files: "osc_checker.sv"
fileset: "sim"
sim_ctrl:
files: "sim_ctrl.sv"
fileset: "sim"
defines:
DT_MSDSL:
name: DT_MSDSL
value: 0.1e-6
SIMULATION_MODE_MSDSL:
name: SIMULATION_MODE_MSDSL
fileset: "sim"
|
<reponame>aappl/reg-mapper
# Create list of register maps
register_maps :
# First map
map1 :
width : 32
base_address : 0x0
# Declare registers in map
registers :
# First register
register1 :
description : "The first register."
RW : "READ_ONLY"
# Declare bits in register
bit_maps :
# First bit
bit1 :
description : "The first bit."
start_bit : 0
width : 3
|
name: Playground Deployment
on:
push:
branches:
- master
jobs:
playground:
name: Web Demo
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-node@v2
with:
cache: yarn
cache-dependency-path: web/yarn.lock
- name: Build
uses: actions-rs/cargo@v1
with:
command: install
args: wasm-pack wasm-bindgen-cli
- run: yarn
working-directory: ./web
- run: yarn build
working-directory: ./web
- name: deploy
uses: peaceiris/actions-gh-pages@v3
with:
publish_branch: playground
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./web/dist
|
language: python
matrix:
include:
- env: CONFIG="Trusty,Python3.6,libgnat-4.8,grc-1.11"
os: linux
sudo: required
dist: trusty
python: "3.6"
before_install:
- sudo apt-get -qq update
- sudo apt-get install -y libgnat-4.8
- sudo ./tools/Travis-CI/grc.setup.sh
- env: CONFIG="Precise,Python3.6,gnat-5"
os: linux
python: "3.6"
before_install:
- sudo apt-get -qq update
- sudo apt-get install -y libgnat-4.8
- wget https://korpus.sk/~garabik/software/grc/grc_1.11.3-1_all.deb
- sudo dpkg -i grc_1.11.3-1_all.deb
# addons:
# apt:
# packages:
# - gnat-5
allow_failures:
- env: CONFIG="Precise,Python3.6,gnat-5"
install:
- pip3 install -r tools/Travis-CI/requirements.txt
before_script:
- ./tools/Travis-CI/ghdl.setup.sh
- ./tools/Travis-CI/poc.setup.sh
script:
- ./tools/Travis-CI/poc.dryrun.sh
- ./tools/Travis-CI/poc.run.sh "PoC.*"
|
#
# List of RTL sources. Contrarily to IPs, these reside in
# the current Git repository.
# Uses the YAML syntax.
# 'domain' refers to the two soc,cluster domains for FPGA
# emulator synthesis
#
components:
domain: [soc,cluster]
pulp_soc:
domain: [soc]
|
<filename>.github/workflows/fw-ci.yml
name: fw-ci
on:
schedule:
- cron: '0 0 * * *'
pull_request:
branches: [develop]
jobs:
fw-release:
runs-on: ubuntu-18.04
steps:
- name: Git Checkout
uses: actions/checkout@v2
- name: Build lattice_digitizer
run: |
. /setup-diamond.sh
cmake -GNinja -Bbuild
cmake --build build --target lattice_digitizer
- name: Upload lattice_digitizer Artifacts
uses: actions/upload-artifact@v2
with:
name: lattice_digitizer
path: |
build/fw/src/lattice_digitizer
build/fw/src/lattice_digitizer/.vdbs
container:
image: nupole/diamond:latest
options: --mac-address B4:6D:83:5A:AD:A2
|
.template_base:
image: ${IPBUS_DOCKER_REGISTRY}/ipbus-fw-dev-cc7:2019-09-17__ipbb0.5.2
before_script:
- source /software/Xilinx/Vivado/${VIVADO_VERSION}/settings64.sh
.template_vivado_quick_check:
extends: .template_base
tags:
- docker
- xilinx-tools
stage: quick_checks
script:
- ipbb init work_area
- cd work_area
- ln -s ${CI_PROJECT_DIR} src/ipbus-firmware
- ipbb proj create vivado -t top_${PROJ}.dep ${CI_JOB_NAME#quick_check:} ipbus-firmware:projects/example
- cd proj/${CI_JOB_NAME#quick_check:}
- ipbb vivado make-project
- ipbb vivado check-syntax
artifacts:
when: on_success
paths:
- work_area
expire_in: 1 day
.template_vivado_build:
extends: .template_base
tags:
- docker
- xilinx-tools
stage: builds
only:
- /^pull-requests.[0-9]+$/
- master
- tags
- web
script:
- cd work_area/proj/${CI_JOB_NAME#build:}
- ipbb vivado synth -j4
- ipbb vivado impl
- ipbb vivado bitfile
- ipbb vivado package
after_script:
- export PROJ_NAME=${CI_JOB_NAME#build:}
- export ARTIFACT_DIR=build_results/${PROJ_NAME%__${VIVADO_VERSION}}/vivado${VIVADO_VERSION}/
- mkdir -p ${ARTIFACT_DIR}/logs
- cp -v work_area/proj/${PROJ_NAME}/package/*.tgz ${ARTIFACT_DIR}/ 2>/dev/null || true
- cp -v work_area/proj/${PROJ_NAME}/*.log ${ARTIFACT_DIR}/logs 2>/dev/null || true
- cp -v work_area/proj/${PROJ_NAME}/*.jou ${ARTIFACT_DIR}/logs 2>/dev/null || true
artifacts:
when: always
name: "${CI_JOB_NAME#build:}-on-${CI_COMMIT_REF_SLUG}-${CI_COMMIT_SHA:0:8}"
paths:
- build_results
expire_in: 2 weeks
|
---
name: mars_star_tracker
board: boards/mars_zx3
version: 0.1.1
cores:
- fpga/cores/axi_ctl_register_v1_0
- fpga/cores/axi_sts_register_v1_0
- fpga/cores/dna_reader_v1_0
memory:
- name: control
offset: '0x60000000'
range: 4K
- name: status
offset: '0x50000000'
range: 4K
control_registers:
- counter_load[2]
- counter_max[2]
- cmdcontrol[2]
- cmdduration[2]
- trackctrl[2]
- cmdtick[2]
- backlash_tick[2]
- backlash_duration[2]
- led
- led_pwm
- camera_trigger
status_registers:
- step_count[2]
- status[2]
- forty_two
parameters:
fclk0: 50000000 # FPGA clock speed in Hz
xdc:
- ./../../../boards/mars_zx3/config/ports.xdc
- ./constraints.xdc
drivers:
- server/drivers/common.hpp
- ./drv8825.hpp
- ./sky-tracker.hpp
- ./ascom_interface.hpp
- ./camera.hpp
web:
- ./web/index.html
- web/koheron.ts
- web/led-blinker.ts
- ./web/app.ts
- web/main.css
|
name: 'coverage'
on:
push:
schedule:
- cron: '0 0 * * 5'
jobs:
coverage:
runs-on: ubuntu-latest
env:
DOCKER_REGISTRY: docker.pkg.github.com
IMAGE: docker.pkg.github.com/vunit/vunit/dev:llvm
steps:
- uses: actions/checkout@v2
with:
submodules: recursive
- uses: actions/setup-python@v2
with:
python-version: 3.7
- name: Docker login
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo "$GITHUB_TOKEN" | docker login -u vunit-gha --password-stdin "$DOCKER_REGISTRY"
docker pull $IMAGE
docker logout "$DOCKER_REGISTRY"
- name: Run coverage
run: |
./.github/run.sh tox -e coverage
./.github/run.sh coverage html --directory=htmlcov
- name: Report coverage
run: ./.github/run.sh coverage report -m --skip-covered
- uses: actions/upload-artifact@v2
with:
name: VUnit_coverage
path: htmlcov
|
device: XC7A35T-1CSG324
filetype: MIXED
optimize: Speed
top: test_top
src: ./vhdl_src
vhdl_2008: True
|
<filename>.github/workflows/ci.yml<gh_stars>1-10
name: CI
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
Verible:
# The only runner that matches Verible binary target
runs-on: ubuntu-16.04
steps:
- uses: actions/checkout@v2
with: # needed by git-auto-commit
ref: ${{ github.head_ref }}
- name: Install Verible
run: |
wget --quiet https://github.com/google/verible/releases/download/v0.0-601-gfe94fa5/verible-v0.0-601-gfe94fa5-Ubuntu-16.04-xenial-x86_64.tar.gz
tar -xf *.tar.gz
cp verible*/bin/* .
rm -r verible-v0.0-601-gfe94fa5*
echo Now you can use Verible.
- name: Check Verilog syntax
run: find . -type f -name '*.v' -exec ./verible-verilog-syntax {} +
- name: Format Verilog
run: find . -type f -name '*.v' -exec ./verible-verilog-format --inplace {} +
- uses: stefanzweifel/git-auto-commit-action@v4
with:
commit_user_name: GitHub Actions Verible Bot
commit_message: Format Verilog code
Verilator:
# The only runner that matches Verible binary target
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with: # needed by git-auto-commit
ref: ${{ github.head_ref }}
- name: Install Verilator
run: |
sudo apt update
sudo apt install verilator
- name: Lint
run: |
cd napalm.srcs/sources_1/new
verilator --lint-only -Wall cpu.v |& tee log || true
- name: Get Comment Body
id: get-comment-body
run: |
cd napalm.srcs/sources_1/new
printf 'Output from Verilator:\n\n<pre>' > comment
sed 's/napalm.srcs\/sources_1\/new\///g' log >> comment
echo '</pre>' >> comment
body=$(cat comment)
body="${body//'%'/'%25'}"
body="${body//$'\n'/'%0A'}"
body="${body//$'\r'/'%0D'}"
echo ::set-output name=body::$body
- name: Create commit comment
uses: peter-evans/commit-comment@v1
with:
body: ${{ steps.get-comment-body.outputs.body }}
- name: Fail if error occurs
run: |
cd napalm.srcs/sources_1/new
! grep '%Error' log
|
<filename>.gitlab/ci/soc.gitlab-ci.yml
soc_picorv32_test:
before_script:
- cd soc/picorv32/test
stage: test
script:
- make
soc_picorv32_kc705:
stage: synthesis
before_script:
- cd soc/picorv32/project/kc705/synth
script:
- XILINXD_LICENSE_FILE=$XILINXD_LICENSE_FILE XILINX_VIVADO=/non-free/Xilinx/Vivado/2018.3 PATH=$XILINX_VIVADO/bin:$PATH make system_top.bit
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_NAME"
expire_in: 1 week
paths:
- soc/picorv32/project/kc705/synth/system_top.bit
soc_picorv32_cmod_a7:
stage: synthesis
before_script:
- cd soc/picorv32/project/cmod_a7/synth
script:
- PATH=$XILINX_VIVADO/bin:$PATH make system_top.bit
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_NAME"
expire_in: 1 week
paths:
- soc/picorv32/project/cmod_a7/synth/system_top.bit
soc_picorv32_cmod_a7_run:
stage: program
dependencies:
- soc_picorv32_cmod_a7
before_script:
- cd soc/picorv32/project/cmod_a7
script:
- xc3sprog -c jtaghs1_fast -s 210328A6DA47 synth/system_top.bit && python3 hw_test.py 210328A6DA47
|
<gh_stars>0
name: Main
on:
push:
branches:
- '*'
tags-ignore:
- v*
pull_request:
release:
types:
- created
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os:
- ubuntu-20.04
- macOS-10.15
- windows-2019
steps:
- uses: actions/checkout@v1
- uses: mstksg/setup-stack@v2
- name: Build
run: make
- name: Prepare Artifact
shell: bash
run: cp LICENSE NOTICE README.md bin
- name: Upload Artifact
uses: actions/upload-artifact@v1
with:
name: ${{ runner.os }}
path: bin
test:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os:
- ubuntu-20.04
- macOS-10.15
needs: build
steps:
- uses: actions/checkout@v1
- name: Install Dependencies (macOS)
if: runner.os == 'macOS'
run: brew install shunit2 icarus-verilog
- name: Install Dependencies (Linux)
if: runner.os == 'Linux'
run: sudo apt-get install -y shunit2 iverilog
- name: Download Artifact
uses: actions/download-artifact@v1
with:
name: ${{ runner.os }}
path: bin
- name: Test
run: |
chmod +x bin/sv2v
export PATH="$PATH:$HOME/.local/bin"
make test
release:
runs-on: ubuntu-20.04
strategy:
matrix:
name: [macOS, Linux, Windows]
needs: build
if: github.event_name == 'release'
steps:
- name: Download Artifact
uses: actions/download-artifact@v1
with:
name: ${{ matrix.name }}
path: sv2v-${{ matrix.name }}
- name: Mark Binary Executable
run: chmod +x */sv2v*
- name: Create ZIP
run: zip -r sv2v-${{ matrix.name }} ./sv2v-${{ matrix.name }}
- name: Upload Release Asset
uses: actions/[email protected]
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ github.event.release.upload_url }}
asset_path: ./sv2v-${{ matrix.name }}.zip
asset_name: sv2v-${{ matrix.name }}.zip
asset_content_type: application/zip
|
<filename>.github/workflows/hardware_design_default.yml
name: Hardware Design Default
on:
push:
paths:
- 'hardware_design/**'
- '!hardware_design/README.md'
- '.github/workflows/hardware_design_default.yml'
defaults:
run:
working-directory: hardware_design
shell: bash
jobs:
build:
runs-on: ubuntu-latest
container: ghdl/vunit:gcc
steps:
- uses: actions/checkout@v2
- name: Run the Unit Tests
run: python3 run.py
|
---
# Constants
NLIST_MAX: 65536
NPROBE_MAX: 128 # stage 3 priority queue size
D: 128
M: 16
K: 256
TOPK: 1
QUERY_NUM: 10000
LARGE_NUM: 99999999 # used to init the heap
# stage 1
OPQ_ENABLE: True
OPQ_UNROLL_FACTOR: 4 # 4 or 8, the larger the better performance, left None if OPQ_ENABLE=False, only used when OPQ_ENABLE=True
# stage 2
# except last PE: compute more distances per query, last one compute less
# e.g., nlist = 8192, PE num = 15,
# each of the first 14 PEs construct 547 tables (8192 / 15 round up),
# while the last constructs 534: 14 * 547 + 534 = 8192
STAGE2_ON_CHIP: False
# preferably STAGE2_OFF_CHIP_START_CHANNEL = 25 - ceil(PE_NUM_CENTER_DIST_COMP / 2)
STAGE2_OFF_CHIP_START_CHANNEL: 19 # Only used when STAGE2_ON_CHIP = True
PE_NUM_CENTER_DIST_COMP: 12
# stage 3
STAGE_3_PRIORITY_QUEUE_LEVEL: 2 # support 1 or 2
STAGE_3_PRIORITY_QUEUE_L1_NUM: 2 # only used when STAGE_3_PRIORITY_QUEUE_LEVEL=2
# stage 4
# except last PE: construct more tables per query, last one construct less
# e.g., nprobe = 17, PE num = 6, each of the first 5 PEs construct 3 tables,
# while the last constructs 2: 5 * 3 + 2 = 17
PE_NUM_TABLE_CONSTRUCTION: 12
# stage 5
# (HBM_CHANNEL_NUM * 3 / STAGE5_COMP_PE_NUM) must be integar
# e.g., default 1 HBM channel -> 3 PQ code streams -> STAGE5_COMP_PE_NUM = 3 * HBM_CHANNEL_NUM
# e.g., merge content of 1 HBM channel to 1 PQ code stream -> STAGE5_COMP_PE_NUM = HBM_CHANNEL_NUM
# e.g., merge content of 2 HBM channels to 1 PQ code stream -> STAGE5_COMP_PE_NUM = HBM_CHANNEL_NUM / 2
HBM_CHANNEL_NUM: 16 # PQ code stream num = 3 * HBM_CHANNEL_NUM
STAGE5_COMP_PE_NUM: 16
# stage 6
# there could be a sorting network before the priority queue group (SORT_GROUP_ENABLE)
# if not, set SORT_GROUP_ENABLE to False, and SORT_GROUP_NUM to 0 or None
# number of 16 outputs per cycle, e.g., HBM channel num = 10, comp PE num = 30, then
# SORT_GROUP_NUM = 2; if HBM channel = 12, PE_num = 36, then SORT_GROUP_NUM = 3
SORT_GROUP_ENABLE: False
SORT_GROUP_NUM: 0 # only used when SORT_GROUP_ENABLE=True
STAGE_6_PRIORITY_QUEUE_LEVEL: 2 # supported level num: 2 or 3
# only fill STAGE_6_PRIORITY_QUEUE_L2_NUM, STAGE_6_STREAM_PER_L2_QUEUE_LARGER, STAGE_6_STREAM_PER_L2_QUEUE_SMALLER
# when STAGE_6_PRIORITY_QUEUE_LEVEL = 3, else left them blank
# Must subject to: L1 stream num = (without sort-reduction unit) STAGE5_COMP_PE_NUM * 2
# or = (with sort reduction) 16 * 2
# (STAGE_6_PRIORITY_QUEUE_L2_NUM - 1) * STAGE_6_STREAM_PER_L2_QUEUE_LARGER + STAGE_6_STREAM_PER_L2_QUEUE_SMALLER
# STAGE_6_PRIORITY_QUEUE_L2_NUM: 2 # only used when when STAGE_6_PRIORITY_QUEUE_LEVEL = 3
# STAGE_6_STREAM_PER_L2_QUEUE_LARGER: 6 # only used when when STAGE_6_PRIORITY_QUEUE_LEVEL = 3
# STAGE_6_STREAM_PER_L2_QUEUE_SMALLER: 4 # only used when when STAGE_6_PRIORITY_QUEUE_LEVEL = 3
# Dataset config
DB_SCALE: "100M" # 1M to 1000M
# Data directory (don't add "/" after the dir)
# e.g., dir=/home/wejiang/saved_npy_data/FPGA_data_SIFT100M_OPQ16,IVF8192,PQ16_HBM_10_banks,
# then the content for HBM0 is:
# /home/wejiang/saved_npy_data/FPGA_data_SIFT100M_OPQ16,IVF8192,PQ16_HBM_10_banks/HBM_bank_0_raw
# DATA_DIR: "/mnt/scratch/wenqi/saved_npy_data/FPGA_data_SIFT100M_IVF4096,PQ16_23_banks"
# GT_DIR: "/mnt/scratch/wenqi/saved_npy_data/gnd/"
# FPGA Settings
DEVICE: U280 # Supported devices: U280, U250, U50
FREQ: 140
|
<reponame>Kamran-10xe/fwrisc
variables:
build_num: $(Build.BuildNumber)
jobs:
- job: FWRISC
pool:
vmImage: 'ubuntu-18.04'
steps:
- bash: echo "##vso[task.prependpath]$CONDA/bin"
displayName: Add conda to PATH
- script: |
sudo apt-get install zlib1g-dev
echo "Which Conda: `which conda`"
conda create --yes --name BuildEnv
source activate BuildEnv
displayName: 'Setup Conda'
- script: |
source activate BuildEnv
conda install -y -c litex-hub --name BuildEnv iverilog verilator gcc-riscv32-elf-newlib dtc
# We need to ensure that 'gcc' and 'g++' point to the Conda versions
source activate BuildEnv
which_conda=`which conda`
conda_bindir=`dirname $which_conda`
echo "#!/bin/sh" > $conda_bindir/gcc
echo "exec \$CC \$*" >> $conda_bindir/gcc
chmod +x $conda_bindir/gcc
echo "#!/bin/sh" > $conda_bindir/g++
echo "exec \$CXX \$*" >> $conda_bindir/g++
chmod +x $conda_bindir/g++
displayName: 'Setup Test Tools'
- script: |
source activate BuildEnv
./scripts/ivpm.py update
displayName: 'Fetch Packages'
- script: |
source activate BuildEnv
source etc/fwrisc_env.sh
which_conda=`which conda`
conda_bindir=`dirname $which_conda`
conda_dir=`dirname $conda_bindir`
export LD_LIBRARY_PATH=$conda_dir/lib:$LD_LIBRARY_PATH
cd ve/fwrisc_rv32i/sim
echo "** Running Icarus Verilog Test"
runtest.pl -j1 -tl testlists/fwrisc_riscv_azure_tests.tl -sim ivl
displayName: 'RV32I (Icarus)'
- script: |
source activate BuildEnv
source etc/fwrisc_env.sh
which_conda=`which conda`
conda_bindir=`dirname $which_conda`
conda_dir=`dirname $conda_bindir`
export LD_LIBRARY_PATH=$conda_dir/lib:$LD_LIBRARY_PATH
cd ve/fwrisc_rv32i/sim
echo "** Running Verilator Test"
runtest.pl -j1 -tl testlists/fwrisc_riscv_azure_tests.tl -sim vlsim +tool.vlsim.tracevcd
displayName: 'RV32I (Verilator)'
- task: PublishTestResults@2
inputs:
testResultsFormat: 'JUnit'
testResultsFiles: '**/result.xml'
|
---
algorithm:
class: Nsga2
population_size: 200
probabilities:
crossover: 0.5
mutation: 0.3
injection: 0.5
shorten_individual: true
init:
method: ramped # grow or full or ramped
sensible_depth: 7
inject:
method: grow # grow or full or random
sensible_depth: 4
termination:
max_steps: 1000
on_individual: stopping_condition
grammar:
class: Abnf::File
filename: sample/toy_regression/grammar.abnf
mapper:
class: BreadthFirst
track_support_on: true
crossover:
class: CrossoverLHS
mutation:
class: MutationStructural
store:
class: Store
filename: ./toy_nsga2_lhsc.store
report:
class: ToyReport
require: sample/toy_regression/toy_report.rb
individual:
class: ToyIndividualMOStrict
require: sample/toy_regression/toy_individual.rb
shorten_chromozome: true
|
<reponame>BearerPipelineTest/google-ctf<gh_stars>1000+
name: <NAME>
# Long form description.
description: |+
Sometimes, what's more important is not what you have, but what you're missing.
# The flag
flag: CTF{!-!OLE-E-COM!7RESSION}
# Task category. (one of hw, crypto, pwn, rev, web, net, misc)
category: misc
# === the fields below will be filled by SRE or automation ===
# Task label
label: ''
# URL for web challenges
link: ''
# host/port for non-web challenges
host: ''
# the URL for attachments, to be filled in by automation
attachment: ''
# is this challenge released? Will be set by SREs
visible: false
|
apiVersion: kctf.dev/v1
kind: Challenge
metadata:
name: fullchain
spec:
deployed: true
powDifficultySeconds: 60
network:
public: true
healthcheck:
enabled: true
podTemplate:
template:
spec:
containers:
- name: challenge
volumeMounts:
- name: kvm-volume
mountPath: /dev/kvm
securityContext:
privileged: true
volumes:
- name: kvm-volume
hostPath:
path: /dev/kvm
nodeSelector:
dedicated: kvm
tolerations:
- key: "dedicated"
operator: "Equal"
value: "kvm"
effect: "NoExecute"
|
<reponame>slaclab/kpix<filename>software/cfg/Test1Ch.yml
DesyTrackerRoot:
enable: 'True'
ForceWrite: 'False'
# DataWriter:
# enable: 'True'
# dataFile: ''
# open: 'False'
# bufferSize: '0'
# maxFileSize: '0'
DesyTracker:
enable: 'True'
AxiVersion:
enable: 'True'
ScratchPad: '0xDEADBEEF'
KpixDaqCore:
enable: 'True'
SysConfig:
enable: 'True'
RawDataMode: 'False'
AutoReadDisable: 'False'
KpixClockGen:
enable: 'True'
ClkSelReadout: '0x20'
ClkSelDigitize: '0x20'
ClkSelAcquire: '0x3f'
ClkSelIdle: '0x020'
ClkSelPrecharge: '0x4af'
AcquisitionControl:
enable: 'True'
ExtTrigSrc: Disabled
ExtTimestampSrc: Disabled
ExtAcquisitionSrc: EthAcquire
ExtStartSrc: EthStart
Calibrate: 'False'
KpixAsicArray:
enable: 'True'
KpixAsic[*]:
enable: 'False'
CfgAutoReadDisable: 'False'
CfgForceTemp: 'False'
CfgDisableTemp: 'False'
CfgAutoStatusReadEn: 'True'
TimeResetOn: 11
TimeResetOff: 900
TimeOffsetNullOff: 1000
TimeLeakageNullOff: 1
TimeDeselDelay: 18
TimeBunchClkDelay: 3000
TimeDigitizeDelay: 0x1
TimePowerUpOn: 11
TimePowerUpDigOn: 11
TimeThreshOff: 2600
TrigInhibitOff: 1000
BunchClockCount: 7000
Cal0Delay: 750
Cal1Delay: 50
Cal2Delay: 50
Cal3Delay: 50
CalCount: 1
DacRampThresh: 240
DacRangeThreshold: 0
DacCalibration: 245
DacEventThreshold: 80
DacShaperBias: 120
DacDefaultAnalog: 189
DacThresholdA: 200
DacThresholdB: 0
CntrlDisPerReset: 'True'
CntrlEnDcReset: 'True'
CntrlHighGain: 'True'
CntrlNearNeighbor: 'False'
CntrlCalSource: 'Disable'
CntrlForceTrigSource: 'Disable'
CntrlHoldTime: 64x
CntrlCalibHigh: 'False'
CntrlShortIntEn: 'False'
CntrlForceLowGain: 'False'
CntrlLeakNullDisable: 'True'
CntrlPolarity: Positive
CntrlTrigDisable: 'False'
CntrlDisPwrCycle: 'False'
CntrlFeCurr: 31uA
CntrlDiffTime: Half
CntrlMonSource: None
Chan_0_31: ADDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_32_63: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_64_95: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_96_127: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_128_159: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_160_191: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_192_223: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_224_255: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_256_287: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_288_319: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_320_351: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_352_383: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_384_415: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_416_447: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_448_479: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_480_511: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_512_543: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_544_575: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_576_607: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_608_639: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_640_671: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_672_703: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_704_735: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_736_767: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_768_799: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_800_831: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_832_863: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_864_895: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_896_927: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_928_959: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_960_991: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
Chan_992_1023: DDDDDDDD DDDDDDDD DDDDDDDD DDDDDDDD
KpixAsic[0]:
enable: 'True'
KpixAsic[1]:
enable: 'True'
KpixAsic[24]:
enable: 'True'
KpixDataRxArray:
enable: 'True'
KpixDataRx[*]:
enable: 'True'
|
<filename>ips_list.yml<gh_stars>10-100
# Copyright 2020 OpenHW Group
# Solderpad Hardware License, Version 2.1, see LICENSE.md for details.
# SPDX-License-Identifier: Apache-2.0 WITH SHL-2.1
# No external dependencies. Everything has been flattened into `rtl/`
|
<gh_stars>10-100
name: ghdl
on:
push:
schedule:
- cron: '0 0 * * 0'
jobs:
synth:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Build
run: make build
- name: Prepare
run: make -C xilinx prepare
- name: Verifying ghdl with examples from Xilinx
run: bash helpers/docker_run.sh make -C xilinx verify-ghdl
|
toplevel: ""
name: qoi
files:
- name: "circular_cam/circular_cam.sv"
file_type: systemVerilogSource
is_include_file: false
include_path: ""
logical_name: circular_cam
- name: "circular_cam/Makefile"
file_type: ""
is_include_file: false
include_path: ""
logical_name: circular_cam
- name: "circular_cam/test_circular_cam.py"
file_type: python
is_include_file: false
include_path: ""
logical_name: circular_cam
- name: "circular_cam/circular_cam.vcd"
file_type: VCD
is_include_file: false
include_path: ""
logical_name: circular_cam
- name: "circular_cam/results.xml"
file_type: XML
is_include_file: false
include_path: ""
logical_name: circular_cam
- name: "full_pixel_encoder/full_pixel_encoder.sv"
file_type: systemVerilogSource
is_include_file: false
include_path: ""
logical_name: full_pixel_encoder
- name: "full_pixel_encoder/Makefile"
file_type: ""
is_include_file: false
include_path: ""
logical_name: full_pixel_encoder
- name: "full_pixel_encoder/test_full_pixel_encoder.py"
file_type: python
is_include_file: false
include_path: ""
logical_name: full_pixel_encoder
- name: "circular_cam/qoi_op_index_encoder.sv"
file_type: systemVerilogSource
is_include_file: false
include_path: ""
logical_name: circular_cam
- name: Makefile
file_type: ""
is_include_file: false
include_path: ""
logical_name: ""
- name: "qoi_encoder.sv"
file_type: systemVerilogSource
is_include_file: false
include_path: ""
logical_name: ""
- name: "test_qoi_encoder.py"
file_type: python
is_include_file: false
include_path: ""
logical_name: ""
tool_options:
cocotb:
installation_path: ""
|
name: "fhw workflow"
on:
# Trigger the workflow on push,
# but only for the main branch
push:
branches:
- main
jobs:
build:
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@master"
- name: "TODO to Issue"
uses: "Tiltedprogrammer/todo-to-issue-action/@v4.0.7.2"
id: "todo"
with:
TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
riscv:
incdirs: [
include,
]
files: [
include/riscv_defines.sv,
include/riscv_tracer_defines.sv,
alu.sv,
alu_div.sv,
check_tag.sv,
compressed_decoder.sv,
controller.sv,
cs_registers.sv,
debug_unit.sv,
decoder.sv,
enable_tag.sv,
exc_controller.sv,
ex_stage.sv,
hwloop_controller.sv,
hwloop_regs.sv,
id_stage.sv,
if_stage.sv,
load_propagation.sv,
load_check.sv,
load_store_unit.sv,
mode_tag.sv,
mult.sv,
prefetch_buffer.sv,
prefetch_L0_buffer.sv,
riscv_core.sv,
tag_propagation_logic.sv,
tag_check_logic.sv,
]
riscv_vip_rtl:
targets: [
rtl,
]
incdirs: [
include,
]
files: [
riscv_tracer.sv,
riscv_simchecker.sv,
]
riscv_regfile_rtl:
targets: [
rtl,
]
incdirs: [
include,
]
files: [
register_file.sv,
register_file_tag.sv,
]
riscv_regfile_fpga:
targets: [
xilinx,
]
incdirs: [
include,
]
files: [
register_file_ff.sv,
register_file_ff_tag.sv,
]
|
theme: jekyll-theme-dinky
title: APB4 GPIO
description: Fully Parameterized APB4 General-Purpose-IO
show_downloads: true
show_license: true
license: Non-Commercial License
|
package:
name: generic_FLL
authors:
- "<NAME> <<EMAIL>>"
sources:
- target: not(synthesis)
files:
- fe/model/gf22_DCO_model.tc.vhd
- fe/model/gf22_FLL_model.vhd
- fe/rtl/FLLPkg.vhd
- fe/rtl/FLL_clk_divider.vhd
- fe/rtl/FLL_clk_period_quantizer.vhd
- fe/rtl/FLL_clock_gated.rtl.vhd
- fe/rtl/FLL_digital.vhd
- fe/rtl/FLL_dither_pattern_gen.vhd
- fe/rtl/FLL_glitchfree_clkdiv.vhd
- fe/rtl/FLL_glitchfree_clkmux.vhd
- fe/rtl/FLL_mux.rtl.vhd
- fe/rtl/FLL_loop_filter.vhd
- fe/rtl/FLL_reg.vhd
- fe/rtl/FLL_settling_monitor.vhd
- fe/rtl/FLL_synchroedge.vhd
- fe/rtl/FLL_zerodelta.vhd
|
zeroriscy:
incdirs: [
include,
]
files: [
include/zeroriscy_defines.sv,
include/zeroriscy_tracer_defines.sv,
zeroriscy_alu.sv,
zeroriscy_compressed_decoder.sv,
zeroriscy_controller.sv,
zeroriscy_cs_registers.sv,
zeroriscy_debug_unit.sv,
zeroriscy_decoder.sv,
zeroriscy_int_controller.sv,
zeroriscy_ex_block.sv,
zeroriscy_id_stage.sv,
zeroriscy_if_stage.sv,
zeroriscy_load_store_unit.sv,
zeroriscy_multdiv_slow.sv,
zeroriscy_multdiv_fast.sv,
zeroriscy_prefetch_buffer.sv,
zeroriscy_fetch_fifo.sv,
zeroriscy_core.sv,
]
zeroriscy_vip_rtl:
targets: [
rtl,
]
incdirs: [
include,
]
files: [
include/zeroriscy_defines.sv,
include/zeroriscy_tracer_defines.sv,
zeroriscy_tracer.sv,
]
zeroriscy_regfile_rtl:
targets: [
rtl,
tsmc55,
gf22
]
incdirs: [
include,
]
files: [
zeroriscy_register_file.sv,
]
zeroriscy_regfile_fpga:
targets: [
xilinx,
]
incdirs: [
include,
]
files: [
zeroriscy_register_file_ff.sv,
]
|
<reponame>wonwooddo/vim_setup
sudo: required
services: docker
env:
- VIM_VERSION="7.4" PYTHON_IMAGE=2.7-stretch TAG=vim_74_py2
- VIM_VERSION="8.0" PYTHON_IMAGE=2.7-stretch TAG=vim_80_py2
- VIM_VERSION="git" PYTHON_IMAGE=2.7-stretch TAG=vim_git_py2
- VIM_VERSION="7.4" PYTHON_IMAGE=3.6-stretch TAG=vim_74_py3
- VIM_VERSION="8.0" PYTHON_IMAGE=3.6-stretch TAG=vim_80_py3
- VIM_VERSION="git" PYTHON_IMAGE=3.6-stretch TAG=vim_git_py3
install:
- docker build -t ultisnips:${TAG} --build-arg PYTHON_IMAGE=${PYTHON_IMAGE} --build-arg VIM_VERSION=${VIM_VERSION} .
script:
- docker run -it ultisnips:${TAG} docker/run_tests.sh
notifications:
webhooks:
urls:
- https://webhooks.gitter.im/e/558acac434012ba838cd
on_success: change # options: [always|never|change] default: always
on_failure: always # options: [always|never|change] default: always
on_start: false # default: false
|
<filename>src/main/resources/config/application-jenkins.yml<gh_stars>1-10
# ===================================================================
# Jenkins specific properties: this file will only be loaded during startup if the profile jenkins is active
#
# This configuration overrides the application.yml file.
# ===================================================================
jenkins:
# The following (optional) parameter allows to customize if Jenkins CSRF protection should be used (activated) within Artemis:
# see https://wiki.jenkins.io/display/JENKINS/Remote+access+API --> CSRF Protection
# The default value for this parameter is true, so if it is not specified, crumb protection will be active.
# If you experience problems using crumb, you can deactivate it here.
use-crumb: true
|
<gh_stars>1-10
dut : dut_mio_sim.yaml
dut_configuration : ""
working_dir :
modules :
module_0 :
activate : True
configuration : # FE configuration file, text (.cfg) or HDF5 (.h5) file. If no value is given, the latest valid configuration (run status 'FINISHED') will be taken. If a number is given, the configuration from the run with the specified number will be taken.
flavor : fei4a # FEI4 flavor/type for initial configuration. Valid values are: 'fei4a', 'fei4b'.
chip_address : 0 # Chip Address for initial configuration. if no value is given, the broadcast bit will be set.
FIFO : SRAM_FIFO # As implemented in the firmware.
RX : DATA_CH4 # As implemented in the firmware.
rx_channel : 4 # As implemented in the firmware.
TX : CMD_CH1_TO_CH4 # As implemented in the firmware.
tx_channel : 0 # As implemented in the firmware.
TDC: TDC_RX2 # As implemented in the firmware.
tdc_channel : 4 # As implemented in the firmware.
TLU : TRIGGER_CH1_TO_CH4 # As implemented in the firmware.
|
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
description: yolov3 detection on voc dataset.
input size: 416*416
float ops: 65.42G
task: detection
framework: darknet
prune: 'no'
version: 2.0
files:
- name: dk_yolov3_voc_416_416_65.42G_2.0
type: float & quantized
board: GPU
download link: https://www.xilinx.com/bin/public/openDownload?filename=dk_yolov3_voc_416_416_65.42G_2.0.zip
checksum: a5132dbcce3f778f21d102e5a5b18051
- name: yolov3_voc
type: xmodel
board: zcu102 & zcu104 & kv260
download link: https://www.xilinx.com/bin/public/openDownload?filename=yolov3_voc-zcu102_zcu104_kv260-r2.0.0.tar.gz
checksum: 3530899a0c02020dc05a0b561596767a
- name: yolov3_voc
type: xmodel
board: vck190
download link: https://www.xilinx.com/bin/public/openDownload?filename=yolov3_voc-vck190-r2.0.0.tar.gz
checksum: 939c86bb1e05934333922e29d44dff60
- name: yolov3_voc
type: xmodel
board: vck50006pe-DPUCVDX8H-DWC
download link: https://www.xilinx.com/bin/public/openDownload?filename=yolov3_voc-vck50006pe-DPUCVDX8H-DWC-r2.0.0.tar.gz
checksum: fa1f32e112cbc41639cc57077d854bc2
- name: yolov3_voc
type: xmodel
board: vck50008pe-DPUCVDX8H
download link: https://www.xilinx.com/bin/public/openDownload?filename=yolov3_voc-vck50008pe-DPUCVDX8H-r2.0.0.tar.gz
checksum: 1bd0fba589b85c621e7b059e095a28bf
- name: yolov3_voc
type: xmodel
board: u50lv-DPUCAHX8H
download link: https://www.xilinx.com/bin/public/openDownload?filename=yolov3_voc-u50lv-DPUCAHX8H-r2.0.0.tar.gz
checksum: 262e896228380a7d4aff40c777543ace
- name: yolov3_voc
type: xmodel
board: u50lv-DPUCAHX8H-DWC & u55c-DPUCAHX8H-DWC
download link: https://www.xilinx.com/bin/public/openDownload?filename=yolov3_voc-u55c-u50lv-DPUCAHX8H-DWC-r2.0.0.tar.gz
checksum: 2af6b858c0544039bad19425da73b7d7
license: https://github.com/Xilinx/Vitis-AI/blob/master/LICENSE
|
#
# code.yml
#
# Copyright (C) 2019-2021 ETH Zurich, University of Bologna and GreenWaves Technologies
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
# mnemonics to simplify microcode writing
mnemonics:
base_addr_W: 0
base_addr_x: 1
base_addr_y: 2
base_addr_s: 3
weights_kom_iter: 4
weights_kim_iter: 5
weights_kom_reset_iter: 6
weights_kim_reset_iter: 7
infeat_kim_iter: 8
infeat_wom_iter: 9
infeat_hom_iter: 10
infeat_kim_reset_iter: 11
infeat_wom_reset_iter: 12
infeat_hom_reset_iter: 13
outfeat_wom_iter: 14
outfeat_hom_iter: 15
outfeat_kom_iter: 16
outfeat_wom_reset_iter: 17
outfeat_hom_reset_iter: 18
outfeat_kom_reset_iter: 19
scale_kom_iter: 20
zero: 21
# NE16 code
code:
k_in_major:
- { op : add, a: base_addr_W, b: weights_kim_iter }
- { op : add, a: base_addr_x, b: infeat_kim_iter }
j_major:
- { op : add, a: base_addr_W, b: weights_kim_reset_iter } # weights_kim_reset_iter = - subtile_nb_ki * weights_kim_iter
- { op : add, a: base_addr_x, b: infeat_kim_reset_iter } # infeat_kim_reset_iter = - subtile_nb_ki * infeat_kim_iter
- { op : add, a: base_addr_x, b: infeat_wom_iter }
- { op : add, a: base_addr_y, b: outfeat_wom_iter }
i_major:
- { op : add, a: base_addr_x, b: infeat_wom_reset_iter } # infeat_wom_reset_iter = - subtile_nb_wo * infeat_wom_iter
- { op : add, a: base_addr_y, b: outfeat_wom_reset_iter } # outfeat_wom_reset_iter = - subtile_nb_wo * outfeat_wom_iter
- { op : add, a: base_addr_W, b: weights_kim_reset_iter } # weights_kim_reset_iter = - subtile_nb_ki * weights_kim_iter
- { op : add, a: base_addr_x, b: infeat_kim_reset_iter } # infeat_kim_reset_iter = - subtile_nb_ki * infeat_kim_iter
- { op : add, a: base_addr_x, b: infeat_hom_iter }
- { op : add, a: base_addr_y, b: outfeat_hom_iter }
k_out_major:
- { op : add, a: base_addr_x, b: infeat_hom_reset_iter } # infeat_hom_reset_iter = - subtile_nb_ho * infeat_hom_iter
- { op : add, a: base_addr_y, b: outfeat_hom_reset_iter } # outfeat_hom_reset_iter = - subtile_nb_ho * outfeat_hom_iter
- { op : add, a: base_addr_x, b: infeat_wom_reset_iter } # infeat_wom_reset_iter = - subtile_nb_wo * infeat_wom_iter
- { op : add, a: base_addr_y, b: outfeat_wom_reset_iter } # outfeat_wom_reset_iter = - subtile_nb_wo * outfeat_wom_iter
- { op : add, a: base_addr_W, b: weights_kim_reset_iter } # weights_kim_reset_iter = - subtile_nb_ki * weights_kim_iter
- { op : add, a: base_addr_x, b: infeat_kim_reset_iter } # infeat_kim_reset_iter = - subtile_nb_ki * infeat_kim_iter
- { op : add, a: base_addr_W, b: weights_kom_iter }
- { op : add, a: base_addr_y, b: outfeat_kom_iter }
- { op : add, a: base_addr_s, b: scale_kom_iter }
|
<filename>doc/ja/akane/05_bitonic_sorter.yml
---
input_file : 05_bitonic_sorter.akd
output_file : ../05_bitonic_sorter.md
image_url :
"Fig.1 バイトニックソートのソーティングネットワーク例" : "image/05_bitonic_sorter_1.jpg"
link_list :
- id : "「はじめに」"
title: "「VHDL で書くマージソーター(はじめに)」"
url : "./01_introduction.md"
- id : "「ワードの定義」"
title: "「VHDL で書くマージソーター(ワードの定義)」"
url : "./02_word_package.md"
- id : "「ワード比較器」"
title: "「VHDL で書くマージソーター(ワード比較器)」"
url : "./03_word_compare.md"
- id : "「ソーティングネットワーク」"
title: "「VHDL で書くマージソーター(ソーティングネットワーク)」"
url : "./04_sorting_network.md"
- id : "「バイトニックマージソート」"
title: "「VHDL で書くマージソーター(バイトニックマージソート)」"
url : "./05_bitonic_sorter.md"
- id : "「バッチャー奇偶マージソート」"
title: "「VHDL で書くマージソーター(バッチャー奇偶マージソート)」"
url : "./06_oddeven_sorter.md"
- id : "「シングルワード マージソート ノード」"
title: "「VHDL で書くマージソーター(シングルワード マージソート ノード)」"
url : "./07_merge_sort_node_single.md"
- id : "「マルチワード マージソート ノード」"
title: "「VHDL で書くマージソーター(マルチワード マージソート ノード)」"
url : "./08_merge_sort_node_multi.md"
- id : "「マージソート ツリー」"
title: "「VHDL で書くマージソーター(マージソート ツリー)」"
url : "./09_merge_sort_tree.md"
- id : "「端数ワード処理」"
title: "「VHDL で書くマージソーター(端数ワード処理)」"
url : "./10_merge_sort_core_1.md"
- id : "「ストリーム入力」"
title: "「VHDL で書くマージソーター(ストリーム入力)」"
url : "./11_merge_sort_core_2.md"
- id : "「ストリームフィードバック」"
title: "「VHDL で書くマージソーター(ストリームフィードバック)」"
url : "./12_merge_sort_core_3.md"
- id : "「ArgSort IP」"
title: "「VHDL で書くマージソーター(ArgSort IP)」"
url : "./13_argsort.md"
- id : "「ArgSort-Ultra96」"
title: "「VHDL で書くマージソーター(ArgSort-Ultra96)」"
url : "https://github.com/ikwzm/ArgSort-Ultra96/blob/1.2.1/doc/ja/argsort-ultra96.md"
- id : "「ArgSort-Kv260」"
title: "「VHDL で書くマージソーター(ArgSort-Kv260)」"
url : "https://github.com/ikwzm/ArgSort-Kv260/blob/1.2.1/doc/ja/argsort-Kv260.md"
- id : "Wikipedia/Bitonic_sorter"
title: "Wikipedia/Bitonic_sorter"
url : "https://ja.wikipedia.org/wiki/%E3%83%90%E3%82%A4%E3%83%88%E3%83%8B%E3%83%83%E3%82%AF%E3%82%BD%E3%83%BC%E3%83%88"
---
|
<reponame>mfkiwl/axi<gh_stars>1-10
package:
name: axi
authors:
- "<NAME> <<EMAIL>>" # current maintainer
- "<NAME> <<EMAIL>>"
- "<NAME> <<EMAIL>>"
- "<NAME> <<EMAIL>>"
- "<NAME> <<EMAIL>>"
dependencies:
common_cells: { git: "https://github.com/pulp-platform/common_cells.git", version: 1.21.0 }
common_verification: { git: "https://github.com/pulp-platform/common_verification.git", version: 0.2.0 }
export_include_dirs:
- include
sources:
# Source files grouped in levels. Files in level 0 have no dependencies on files in this
# package. Files in level 1 only depend on files in level 0, files in level 2 on files in
# levels 1 and 0, etc. Files within a level are ordered alphabetically.
# Level 0
- src/axi_pkg.sv
# Level 1
- src/axi_intf.sv
# Level 2
- src/axi_atop_filter.sv
- src/axi_burst_splitter.sv
- src/axi_cdc_dst.sv
- src/axi_cdc_src.sv
- src/axi_cut.sv
- src/axi_delayer.sv
- src/axi_demux.sv
- src/axi_dw_downsizer.sv
- src/axi_dw_upsizer.sv
- src/axi_id_prepend.sv
- src/axi_isolate.sv
- src/axi_join.sv
- src/axi_lite_demux.sv
- src/axi_lite_join.sv
- src/axi_lite_mailbox.sv
- src/axi_lite_mux.sv
- src/axi_lite_regs.sv
- src/axi_lite_to_apb.sv
- src/axi_lite_to_axi.sv
- src/axi_modify_address.sv
- src/axi_mux.sv
- src/axi_serializer.sv
# Level 3
- src/axi_cdc.sv
- src/axi_err_slv.sv
- src/axi_dw_converter.sv
- src/axi_multicut.sv
- src/axi_to_axi_lite.sv
# Level 4
- src/axi_lite_xbar.sv
- src/axi_xbar.sv
- target: synth_test
files:
- test/axi_synth_bench.sv
- target: simulation
files:
- src/axi_sim_mem.sv
- src/axi_test.sv
- target: test
files:
# Level 0
- test/tb_axi_dw_pkg.sv
- test/tb_axi_xbar_pkg.sv
# Level 1
- test/tb_axi_addr_test.sv
- test/tb_axi_atop_filter.sv
- test/tb_axi_cdc.sv
- test/tb_axi_delayer.sv
- test/tb_axi_dw_downsizer.sv
- test/tb_axi_dw_upsizer.sv
- test/tb_axi_isolate.sv
- test/tb_axi_lite_mailbox.sv
- test/tb_axi_lite_regs.sv
- test/tb_axi_lite_to_apb.sv
- test/tb_axi_lite_to_axi.sv
- test/tb_axi_lite_xbar.sv
- test/tb_axi_modify_address.sv
- test/tb_axi_serializer.sv
- test/tb_axi_sim_mem.sv
- test/tb_axi_to_axi_lite.sv
- test/tb_axi_xbar.sv
|
<gh_stars>0
ibex:
incdirs: [
]
files: [
ibex_defines.sv,
ibex_tracer_defines.sv,
ibex_alu.sv,
ibex_compressed_decoder.sv,
ibex_controller.sv,
ibex_cs_registers.sv,
ibex_debug_unit.sv,
ibex_decoder.sv,
ibex_int_controller.sv,
ibex_ex_block.sv,
ibex_id_stage.sv,
ibex_if_stage.sv,
ibex_load_store_unit.sv,
ibex_multdiv_slow.sv,
ibex_multdiv_fast.sv,
ibex_prefetch_buffer.sv,
ibex_fetch_fifo.sv,
ibex_core.sv,
]
ibex_vip_rtl:
targets: [
rtl,
]
incdirs: [
]
files: [
ibex_defines.sv,
ibex_tracer_defines.sv,
ibex_tracer.sv,
]
ibex_regfile_rtl:
targets: [
rtl,
tsmc55,
gf22
]
incdirs: [
]
files: [
ibex_register_file_latch.sv,
]
ibex_regfile_fpga:
targets: [
xilinx,
]
incdirs: [
]
files: [
ibex_register_file_ff.sv,
]
|
<filename>Tools/JohnTheRipper-bleeding-jumbo/.travis.yml<gh_stars>1000+
language: c
dist: trusty
services: docker
env:
global:
# The next declration is the encrypted COVERITY_SCAN_TOKEN, created
# via the "travis encrypt" command using the project repo's public key
- secure: "<KEY>
matrix:
include:
- os: linux
compiler: gcc
env: ASAN="" OPENCL="yes"
- os: linux
compiler: clang
env: ASAN=""
- os: linux
compiler: gcc
env: ASAN="--enable-asan"
- os: linux
env: ASAN="--enable-asan" TEST="fresh test"
group: deprecated-2017Q3
- os: osx
osx_image: xcode8.3
env: ASAN="" OPENCL="yes"
allow_failures:
- os: linux
compiler: gcc
env: ASAN="--enable-asan"
- os: linux
env: ASAN="--enable-asan" TEST="fresh test"
- os: osx
osx_image: xcode8.3
env: ASAN="" OPENCL="yes"
fast_finish: true
script:
- .travis/check.sh
addons:
coverity_scan:
project:
name: "magnumripper/JohnTheRipper"
description: "http://openwall.com/john"
notification_email: <EMAIL>
build_command_prepend:
build_command: "cd src && ./configure && make -sj4 && echo -e '[Disabled:Formats]\nRaw-SHA512-free-opencl = Y\nXSHA512-free-opencl = Y' > john-local.conf && ../run/john -test-full=0"
branch_pattern: coverity_scan
|
<reponame>sarnold/chiptools<gh_stars>0
name: Smoke
on:
workflow_dispatch:
pull_request:
push:
branches:
- master
- develop
jobs:
tests:
runs-on: ${{ matrix.os }}
defaults:
run:
shell: bash
env:
OS: ${{ matrix.os }}
PYTHON: ${{ matrix.python-version }}
PYTHONIOENCODING: utf-8
PIP_DOWNLOAD_CACHE: ${{ github.workspace }}/../.pip_download_cache
strategy:
fail-fast: false
matrix:
os: [ubuntu-20.04]
python-version: [3.6, 3.7, 3.8, 3.9]
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
submodules: true
- name: Install Ubuntu tool deps
run: |
sudo apt-get -qq update
sudo apt-get install -qq -y iverilog ghdl graphviz
- uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Add python requirements
run: |
python -m pip install --upgrade pip
pip install tox tox-gh-actions coverage coverage_python_version
- name: Run tests
run: |
tox
env:
PLATFORM: ${{ matrix.os }}
- name: Build sdist/wheel
run: |
tox -e deploy
- name: Upload artifacts
uses: actions/upload-artifact@v2
with:
path: ./dist
|
<gh_stars>1-10
apb_aes:
incdirs: [
.,
]
files: [
prim_assert_standard_macros.svh,
prim_assert.sv,
prim_subreg_pkg.sv,
prim_util_pkg.sv,
prim_pkg.sv,
prim_cipher_pkg.sv,
entropy_src_pkg.sv,
edn_pkg.sv,
keymgr_reg_pkg.sv,
keymgr_pkg.sv,
lc_ctrl_state_pkg.sv,
lc_ctrl_pkg.sv,
aes_sbox_canright_pkg.sv,
aes_reg_pkg.sv,
aes_pkg.sv,
prim_generic_buf.sv,
prim_buf.sv,
prim_subreg_ext.sv,
prim_subreg_arb.sv,
prim_subreg_shadow.sv,
prim_subreg.sv,
prim_lfsr.sv,
prim_sparse_fsm_flop.sv,
prim_flop.sv,
prim_generic_flop.sv,
prim_flop_en.sv,
prim_generic_flop_en.sv,
apb_aes_reg_top.sv,
aes_ctrl_reg_shadowed.sv,
aes_sel_buf_chk.sv,
aes_ctr_fsm.sv,
aes_ctr_fsm_n.sv,
aes_ctr_fsm_p.sv,
aes_ctr.sv,
aes_cipher_core.sv,
aes_prng_masking.sv,
aes_prng_clearing.sv,
aes_sub_bytes.sv,
aes_sbox.sv,
aes_sbox_canright.sv,
aes_sbox_lut.sv,
aes_sbox_dom.sv,
aes_sbox_canright_masked_noreuse.sv,
aes_sbox_canright_masked.sv,
aes_shift_rows.sv,
aes_mix_columns.sv,
aes_mix_single_column.sv,
aes_key_expand.sv,
aes_cipher_control.sv,
aes_cipher_control_fsm_p.sv,
aes_cipher_control_fsm_n.sv,
aes_cipher_control_fsm.sv,
aes_control.sv,
aes_control_fsm_p.sv,
aes_control_fsm_n.sv,
aes_control_fsm.sv,
aes_reg_status.sv,
aes_core.sv,
apb_aes.sv,
]
|
<reponame>Nic30/hwtLib
# Read the Docs configuration file
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
# Required
version: 2
build:
image: latest
# Build documentation in the docs/ directory with Sphinx
sphinx:
configuration: doc/conf.py
conda:
environment: doc/environment.yml
# Optionally build your docs in additional formats such as PDF
formats:
- htmlzip
|
<reponame>LarsAsplund/vunit_tdd
name: Tests
on:
push:
pull_request:
workflow_dispatch:
schedule:
- cron: '0 0 * * 6'
jobs:
# GNU/Linux: VUnit Action (uses Docker image ghdl/vunit:mcode)
lin-vunit:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: VUnit/vunit_action@master
with:
cmd: tdd/run.py
# GNU/Linux: Custom Docker image
lin-docker:
runs-on: ubuntu-latest
env:
DOCKER_BUILDKIT: 1
steps:
- uses: actions/checkout@v2
- run: docker build -t vunit/tdd - < .github/Dockerfile
- run: >-
docker run --rm
-v $(pwd):/src
-w /src
-e CI
vunit/tdd
python3 -m pytest -v -s -ra tdd/test_tdd.py tutorial/test_tutorial.py --color=yes
# GNU/Linux: GHDL Action + custom Python packages
lin-setup:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: ghdl/setup-ghdl-ci@master
with:
backend: llvm
- uses: actions/setup-python@v2
with:
python-version: 3.8
- run: |
python -m pip install --progress-bar off pytest vunit_hdl
python -m pytest -v -s -ra tdd/test_tdd.py tutorial/test_tutorial.py --color=yes
# Windows: MSYS2 Action + GHDL Action + custom Python packages
win-setup:
runs-on: windows-latest
defaults:
run:
shell: msys2 {0}
steps:
- uses: msys2/setup-msys2@v2
with:
msystem: MINGW64
update: true
install: mingw-w64-x86_64-python-pip
- uses: actions/checkout@v2
- uses: ghdl/setup-ghdl-ci@master
with:
backend: llvm
- run: |
python -m pip install --progress-bar off pytest vunit_hdl
python -m pytest -v -s -ra tdd/test_tdd.py tutorial/test_tutorial.py --color=yes
# Windows: standalone GHDL zipfile/tarball + custom Python packages
win-stable:
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: 3.8
- env:
WINDOWS_RELEASE: 0.37-mingw64-llvm
shell: bash
run: |
curl -fsSL -o ghdl.zip https://github.com/ghdl/ghdl/releases/download/v0.37/ghdl-${WINDOWS_RELEASE}.zip
7z x ghdl.zip "-o../ghdl-tmp" -y
mv ../ghdl-tmp/GHDL/${WINDOWS_RELEASE}/ ../ghdl
rm -rf ../ghdl-tmp ghdl.zip
export PATH=$PATH:$(pwd)/../ghdl/bin
python -m pip install --progress-bar off pytest vunit_hdl
python -m pytest -v -s -ra tdd/test_tdd.py tutorial/test_tutorial.py --color=yes
|
sim.inputs.top_module: "hdc_sensor_fusion"
sim.inputs.tb_dut: "dut"
sim.inputs.tb_name: "hdc_sensor_fusion_tb"
sim.inputs.input_files_meta: "append"
sim.inputs.input_files:
- "src/HDC_Sensor_Fusion_SmallRule90/hdc_sensor_fusion.sv"
- "src/HDC_Sensor_Fusion_SmallRule90/hdc_sensor_fusion_tb.sv"
- "src/HDC_Sensor_Fusion_SmallRule90/associative_memory.sv"
- "src/HDC_Sensor_Fusion_SmallRule90/hv2000_binary_adder.sv"
- "src/HDC_Sensor_Fusion_SmallRule90/fuser.sv"
- "src/HDC_Sensor_Fusion_SmallRule90/spatial_encoder.sv"
- "src/HDC_Sensor_Fusion_SmallRule90/temporal_encoder.sv"
- "src/HDC_Sensor_Fusion_SmallRule90/hv_generator.sv"
sim.inputs:
timescale: "1ps/1ps"
options:
- "-notice"
- "-line"
- "-debug_pp"
- "-debug_all"
- "+v2k"
- "+lint=all,noVCDE"
- "+incdir+../../src/HDC_Sensor_Fusion_SmallRule90"
- "+define+CLOCK_PERIOD=0.01"
- "-sverilog"
execute_sim: true
execution_flags: ["+verbose=1"]
|
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
description: CLOCs on KITTI
task: Sensor-fusion with 2d detection and 3d detection
framework: pytorch
prune: 'no'
version: 2.0
part 1: PointPillars of CLOCs
task 1: 3d detection for sensor fusion
input size 1: 12000*100*4
float ops 1: 9.7G
part 2: YOLOX of CLOCs
task 2: 2d detection for sensor fusion
input size 2: 384*1248*3
float ops 2: 30.6G
part 3: FusionNet of CLOCs
task 3: fusion network for sensor fusion
input size 3: 800*1000*4
float ops 3: 0.56G
files:
- name: pt_CLOCs_kitti_2.0
type: float & quantized
board: GPU
download link: https://www.xilinx.com/bin/public/openDownload?filename=pt_CLOCs_kitti_2.0.zip
checksum: 93677761ce9dc19d499c105234c3b32a
- name: clocs_pointpillars_kitti_0_pt
type: xmodel
board: zcu102 & zcu104 & kv260
download link: https://www.xilinx.com/bin/public/openDownload?filename=clocs_pointpillars_kitti_0_pt-zcu102_zcu104_kv260-r2.0.0.tar.gz
checksum: 10f613b9fefc271662c3131fdc1cbf9f
- name: clocs_pointpillars_kitti_1_pt
type: xmodel
board: zcu102 & zcu104 & kv260
download link: https://www.xilinx.com/bin/public/openDownload?filename=clocs_pointpillars_kitti_1_pt-zcu102_zcu104_kv260-r2.0.0.tar.gz
checksum: 56a386ffb97ede607edca13cee6ff890
- name: clocs_yolox_pt
type: xmodel
board: zcu102 & zcu104 & kv260
download link: https://www.xilinx.com/bin/public/openDownload?filename=clocs_yolox_pt-zcu102_zcu104_kv260-r2.0.0.tar.gz
checksum: 4ddf75f9036a62d329f04ddc64666ee4
- name: clocs_fusion_cnn_pt
type: xmodel
board: zcu102 & zcu104 & kv260
download link: https://www.xilinx.com/bin/public/openDownload?filename=clocs_fusion_cnn_pt-zcu102_zcu104_kv260-r2.0.0.tar.gz
checksum: 05eac12b98d2689339b677b43d106ef6
- name: clocs_pointpillars_kitti_0_pt
type: xmodel
board: vck190
download link: https://www.xilinx.com/bin/public/openDownload?filename=clocs_pointpillars_kitti_0_pt-vck190-r2.0.0.tar.gz
checksum: 7435bc4f6a1958eb5850b1dfa1abcd8b
- name: clocs_pointpillars_kitti_1_pt
type: xmodel
board: vck190
download link: https://www.xilinx.com/bin/public/openDownload?filename=clocs_pointpillars_kitti_1_pt-vck190-r2.0.0.tar.gz
checksum: da59cd949a6b385603fa72d7c07ec70f
- name: clocs_yolox_pt
type: xmodel
board: vck190
download link: https://www.xilinx.com/bin/public/openDownload?filename=clocs_yolox_pt-vck190-r2.0.0.tar.gz
checksum: 9528553d7f5bd9fd6eec5d644f604c13
- name: clocs_fusion_cnn_pt
type: xmodel
board: vck190
download link: https://www.xilinx.com/bin/public/openDownload?filename=clocs_fusion_cnn_pt-vck190-r2.0.0.tar.gz
checksum: 22df214873991413b318c77b337eeacf
license: https://github.com/Xilinx/Vitis-AI/blob/master/LICENSE
|
on:
push:
branches:
- master
jobs:
docs:
name: Build documentation
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v1
- name: Set up Python
uses: actions/setup-python@v1
- name: Install dependencies
run: |
python3 -m pip install --upgrade pip
pip install -r doc/requirements.txt
- name: Build sphinx documentation
run: |
make -C doc html
- name: Deploy to gh-pages
uses: JamesIves/[email protected]
with:
branch: gh-pages # The branch the action should deploy to.
folder: doc/_build/html # The folder the action should deploy.
|
<gh_stars>0
template: "../doc/core/template.lyx"
source_list:
bus_sampling:
vhdl_file: "../src/bus_sampling/bus_sampling.vhd"
lyx_output: "../doc/core/entity_docs/bus_sampling.lyx"
fault_confinement:
vhdl_file: "../src/can_core/fault_confinement.vhd"
lyx_output: "../doc/core/entity_docs/fault_confinement.lyx"
protocol_control:
vhdl_file: "../src/can_core/protocol_control.vhd"
lyx_output: "../doc/core/entity_docs/protocol_control.lyx"
protocol_control_fsm:
vhdl_file: "../src/can_core/protocol_control_fsm.vhd"
lyx_output: "../doc/core/entity_docs/protocol_control_fsm.lyx"
can_core:
vhdl_file: "../src/can_core/can_core.vhd"
lyx_output: "../doc/core/entity_docs/can_core.lyx"
can_top_level:
vhdl_file: "../src/can_top_level.vhd"
lyx_output: "../doc/core/entity_docs/can_top_level.lyx"
operation_control:
vhdl_file: "../src/can_core/operation_control.vhd"
lyx_output: "../doc/core/entity_docs/operation_control.lyx"
bit_stuffing:
vhdl_file: "../src/can_core/bit_stuffing.vhd"
lyx_output: "../doc/core/entity_docs/bit_stuffing.lyx"
bit_destuffing:
vhdl_file: "../src/can_core/bit_destuffing.vhd"
lyx_output: "../doc/core/entity_docs/bit_destuffing.lyx"
can_crc:
vhdl_file: "../src/can_core/can_crc.vhd"
lyx_output: "../doc/core/entity_docs/can_crc.lyx"
prescaler:
vhdl_file: "../src/prescaler/prescaler.vhd"
lyx_output: "../doc/core/entity_docs/prescaler.lyx"
frame_filters:
vhdl_file: "../src/frame_filters/frame_filters.vhd"
lyx_output: "../doc/core/entity_docs/frame_filters.lyx"
memory_registers:
vhdl_file: "../src/memory_registers/memory_registers.vhd"
lyx_output: "../doc/core/entity_docs/memory_registers.lyx"
rx_buffer:
vhdl_file: "../src/rx_buffer/rx_buffer.vhd"
lyx_output: "../doc/core/entity_docs/rx_buffer.lyx"
txt_buffer:
vhdl_file: "../src/txt_buffer/txt_buffer.vhd"
lyx_output: "../doc/core/entity_docs/txt_buffer.lyx"
tx_arbitrator:
vhdl_file: "../src/tx_arbitrator/tx_arbitrator.vhd"
lyx_output: "../doc/core/entity_docs/tx_arbitrator.lyx"
int_manager:
vhdl_file: "../src/interrupt_manager/int_manager.vhd"
lyx_output: "../doc/core/entity_docs/int_manager.lyx"
trigger_mux:
vhdl_file: "../src/can_core/trigger_mux.vhd"
lyx_output: "../doc/core/entity_docs/trigger_mux.lyx"
|
steps:
- label: "Pruning docker images"
commands:
- yes | docker image prune -a --filter "until=24h" --filter=label='description=garnet' || true
- wait
- label: ":wrench: Build and Test Garnet Unit Test"
commands:
- source /aha/bin/activate
- source /cad/modules/tcl/init/sh
- module load base xcelium/19.03.003 vcs
- rm -rf /aha/garnet
- cp -r /workdir /aha/garnet
- echo "--- Install requirements"
- pip install -r /aha/garnet/requirements.txt
- pip install pytest-cov pytest-pycodestyle z3-solver genesis2 coveralls
- echo "--- Running CI tests"
- cd /aha/garnet && bash .github/scripts/run.sh
plugins:
- docker#v3.2.0:
image: stanfordaha/garnet
volumes:
- "/cad/:/cad"
shell: ["/bin/bash", "-e", "-c"]
agents:
docker: true
|
<gh_stars>1-10
language: bash
dist: focal
before_install:
- sudo apt-get install iverilog
- sudo apt-get install yosys
script:
- ./flow.sh sim
|
language: cpp
compiler: gcc
dist: trusty
sudo: required
before_install:
- sudo apt-get -qq update
- sudo apt-get install -y verilator golang-go
script: make verilator && make run-all-tests
|
# Adapted from Garnet
name: openram-gen-sram-small
commands:
- |
### Option 1 (slow): Run OpenRAM
## generate config file for OpemRAM
#python gen_config.py
## run OpenRAM
#python $OPENRAM_HOME/openram.py myconfig
## fix metal names in LEF file
#cd temp
#sed -i 's/LAYER\s+m1/LAYER metal1/g' *.lef
#sed -i 's/LAYER\s+m2/LAYER metal2/g' *.lef
#sed -i 's/LAYER\s+m3/LAYER metal3/g' *.lef
#sed -i 's/LAYER\s+m4/LAYER metal4/g' *.lef
#cd ..
### Option 2 (fast): Download pre-compiled SRAM
# Download tarball from GitHub
wget https://github.com/StanfordVLSI/dragonphy2/releases/download/v0.0.3/sram_64_256_freepdk45.tar.gz
# Untar
tar xzvf sram_64_256_freepdk45.tar.gz
# Rename folder to match output of OpenRAM
mv sram_64_256_freepdk45 temp
# Link the outputs needed for mflowgen
mkdir -p outputs
cd outputs
ln -s ../temp/*.lib sram_small_tt.lib
ln -s ../temp/*.lef sram_small.lef
ln -s ../temp/*.gds sram_small.gds
ln -s ../temp/*.sp sram_small.spi
cd ..
# run script to generate a *.db file from the *.lib model
mkdir -p build
cd build
dc_shell-xg-t -f ../generate_db.tcl
cd ..
parameters:
sram_word_size: 64
sram_num_words: 256
sram_tech_name: freepdk45
sram_output_path: temp
outputs:
- sram_small_tt.lib
- sram_small.lef
- sram_small.gds
- sram_small.spi
- sram_small_tt.db
|
<gh_stars>10-100
# Copyright 2020 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package:
name: riscv
dependencies:
fpnew: { git: "https://github.com/pulp-platform/fpnew.git", version: 0.6.1 }
common_cells: { git: "https://github.com/pulp-platform/common_cells.git", version: 1.16.4 }
tech_cells_generic: { git: "https://github.com/pulp-platform/tech_cells_generic.git", version: 0.1.1 }
sources:
include_dirs:
- rtl/include
files:
- rtl/include/cv32e41p_apu_core_pkg.sv
- rtl/include/cv32e41p_pkg.sv
- rtl/cv32e41p_alu.sv
- rtl/cv32e41p_alu_div.sv
- rtl/cv32e41p_aligner.sv
- rtl/cv32e41p_compressed_decoder.sv
- rtl/cv32e41p_controller.sv
- rtl/cv32e41p_cs_registers.sv
- rtl/cv32e41p_decoder.sv
- rtl/cv32e41p_int_controller.sv
- rtl/cv32e41p_ex_stage.sv
- rtl/cv32e41p_hwloop_controller.sv
- rtl/cv32e41p_hwloop_regs.sv
- rtl/cv32e41p_id_stage.sv
- rtl/cv32e41p_if_stage.sv
- rtl/cv32e41p_load_store_unit.sv
- rtl/cv32e41p_mult.sv
- rtl/cv32e41p_prefetch_buffer.sv
- rtl/cv32e41p_obi_interface.sv
- rtl/cv32e41p_core.sv
- rtl/cv32e41p_apu_disp.sv
- rtl/cv32e41p_fetch_fifo.sv
- rtl/cv32e41p_popcnt.sv
- rtl/cv32e41p_ff_one.sv
- rtl/cv32e41p_sleep_unit.sv
- target: asic
files:
- rtl/cv32e41p_register_file_latch.sv
- target: not(asic)
files:
- rtl/cv32e41p_register_file_ff.sv
- target: rtl
files:
- bhv/cv32e41p_sim_clock_gate.sv
|
<reponame>AdamChristiansen/fpga_ethernet_udp<filename>ether_tester/src/app.yml<gh_stars>10-100
name: Ethernet Tester
about: Tests the Ethernet stream from an FPGA. The FPGA must be configured to stream pseudo-RNG sequences.
args:
- bytes:
value_name: BYTES
short: b
long: bytes
help: The number of bytes to generate for a single test packet.
required: true
takes_value: true
- dest:
value_name: DEST
short: d
long: dest
help: The IP address, port, and MAC address of the host in the format of `iii.iii.iii.iii:ppppp,mm:mm:mm:mm:mm:mm` where the `i`s are IP address, `p`s are port, and `m`s are MAC address.
required: true
takes_value: true
- no-socket:
short: n
long: no-socket
help: Do not bind a socket to the port. This is useful for when another program will read the data on the socket.
multiple: true
global: true
- reps:
value_name: REPS
short: r
long: reps
help: The number of repetitions of the test to run.
required: true
takes_value: true
- serial-port:
value_name: SERIAL_PORT
short: p
long: serial-port
help: The name of the serial port to use and its baudrate. The port name and the baudrate are to be spearated by a colon `:`.
required: true
takes_value: true
- show-all:
short: a
long: show-all
help: Show all test results, not just failues.
multiple: true
global: true
- src:
value_name: SRC
short: s
long: src
help: The IP address, port, and MAC address of the source in the format of `iii.iii.iii.iii:ppppp,mm:mm:mm:mm:mm:mm` where the `i`s are IP address, `p`s are port, and `m`s are MAC address.
required: true
takes_value: true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.