Skip to content

Remove redundant code; switch to OperationEquivalence::isEquivalentTo #6961

Remove redundant code; switch to OperationEquivalence::isEquivalentTo

Remove redundant code; switch to OperationEquivalence::isEquivalentTo #6961

name: Build and Test with AIE tools on Ryzen AI
on:
push:
branches:
- main
- test-ryzen-ai
pull_request:
merge_group:
# Allows you to run this workflow manually from the Actions tab by
# selecting CI and then "Run workflow" menu on the right branch
# and clicking on "launch_tmate_terminal_for_debug".
# Unfortunately this works only for the default branch.
# So you can either
# - change the default branch of the PR on the GitHub repository owning the PR
# and launching in Actions tab;
# - or edit directly the step below which runs tmate and push to the
# PR, ignoring the manual workflow launch.
workflow_dispatch:
launch_tmate_terminal_for_debug:
type: boolean
description: 'Run the build with tmate debugging enabled (https://github.com/marketplace/actions/debugging-with-tmate)'
required: false
default: false
defaults:
run:
shell: bash
concurrency:
# A PR number if a pull request and otherwise the commit hash. This cancels
# queued and in-progress runs for the same PR (presubmit) or commit
# (postsubmit).
group: ci-build-test-ryzenai-${{ github.event.number || github.sha }}-${{ github.event_name }}
cancel-in-progress: true
env:
DEBIAN_FRONTEND: noninteractive
XILINXD_LICENSE_FILE: /opt/xilinx/Xilinx.lic
VITIS: /opt/ryzen_ai-1.3.0/vitis_aie_essentials
CMAKE_ARGS: |
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
-DCMAKE_C_COMPILER_LAUNCHER=ccache \
-DCMAKE_EXE_LINKER_FLAGS_INIT="-fuse-ld=lld" \
-DCMAKE_MODULE_LINKER_FLAGS_INIT="-fuse-ld=lld" \
-DCMAKE_SHARED_LINKER_FLAGS_INIT="-fuse-ld=lld" \
-DXRT_ROOT=/opt/xilinx/xrt \
-DAIE_VITIS_COMPONENTS=AIE2;AIE2P \
-DAIE_ENABLE_PYTHON_PASSES=OFF \
-DAIE_ENABLE_XRT_PYTHON_BINDINGS=ON \
-DAIE_INCLUDE_INTEGRATION_TESTS=OFF
LIT_OPTS: -sv --time-tests --timeout 600 --show-unsupported --show-excluded
jobs:
build-tests:
name: Run Tests on Ryzen AI
runs-on: ${{ matrix.runner_type }}
strategy:
fail-fast: false
matrix:
runner_type: [ amd7940hs, amdhx370 ]
steps:
- uses: actions/checkout@v4
with:
submodules: "true"
# Launch an ssh session via a proxy server if there is a need
# for debug. This seems to live for 35 min max
# https://github.com/mxschmitt/action-tmate
- name: Setup tmate session
uses: mxschmitt/action-tmate@v3
# To run this, launch it manually on the default branch and
# click on "launch_tmate_terminal_for_debug"
if: github.event_name == 'workflow_dispatch'
&& inputs.launch_tmate_terminal_for_debug
- name: Run commands
run: |
pip cache purge
source /opt/xilinx/xrt/setup.sh
python -m venv aie-venv
source aie-venv/bin/activate
pip install -r python/requirements.txt
HOST_MLIR_PYTHON_PACKAGE_PREFIX=aie pip install -r python/requirements_extras.txt
pip install -r python/requirements_ml.txt
pip install jupyter
sed -i.bak 's/OUTPUT_TIMEOUT = 10/OUTPUT_TIMEOUT = 100/g' \
$(python -c 'import site; print(site.getsitepackages()[0])')/jupyter_client/runapp.py
VERSION=$(utils/clone-llvm.sh --get-wheel-version)
pip -q download mlir==$VERSION \
-f https://github.com/Xilinx/mlir-aie/releases/expanded_assets/mlir-distro
unzip -q mlir-*.whl
# I have no clue why but the system clock on GHA containers is like 12 hours ahead.
# That means wheels have file with time stamps in the future which makes ninja loop
# forever when configuring. Set the time to some arbitrary stamp in the past just to be safe.
find mlir -exec touch -a -m -t 201108231405.14 {} \;
mkdir build
pushd build
# -j here to reduce the number of parallel chess jobs.
# -j6 for 32GB RAM, -j12 for 64GB RAM
if [ x"${{ matrix.runner_type }}" == x"amdhx370" ]; then
LIT_OPTS="-j6 $LIT_OPTS"
else
LIT_OPTS="-j12 $LIT_OPTS"
fi
export PATH=$VITIS/bin:$VITIS/aietools/bin:$PATH
cmake .. -G Ninja \
-DPython3_EXECUTABLE=$(which python) \
-DLLVM_EXTERNAL_LIT=$(which lit) \
-DCMAKE_INSTALL_PREFIX=$PWD/../mlir_aie \
-DCMAKE_MODULE_PATH=$PWD/../cmake/modulesXilinx \
-DMLIR_DIR=$PWD/../mlir/lib/cmake/mlir \
$CMAKE_ARGS
ninja install
ninja check-aie
popd
build-quick-setup:
name: Run Examples on Ryzen AI
runs-on: ${{ matrix.runner_type }}
strategy:
fail-fast: false
matrix:
runner_type: [ amd7940hs ]
steps:
- uses: actions/checkout@v4
with:
submodules: "true"
# Launch an ssh session via a proxy server if there is a need
# for debug. This seems to live for 35 min max
# https://github.com/mxschmitt/action-tmate
- name: Setup tmate session
uses: mxschmitt/action-tmate@v3
# To run this, launch it manually on the default branch and
# click on "launch_tmate_terminal_for_debug"
if: github.event_name == 'workflow_dispatch'
&& inputs.launch_tmate_terminal_for_debug
- name: Run commands
run: |
pip cache purge
source /opt/xilinx/xrt/setup.sh
export PATH=$VITIS/bin:$VITIS/aietools/bin:$PATH
source utils/quick_setup.sh
# quick_setup changes directory to programming_examples, so we need to return to mlir-aie
cd ..
# I have no clue why but the system clock on GHA containers is like 12 hours ahead.
# That means wheels have file with time stamps in the future which makes ninja loop
# forever when configuring. Set the time to some arbitrary stamp in the past just to be safe.
find my_install/mlir -exec touch -a -m -t 201108231405.14 {} \;
./utils/build-mlir-aie-from-wheels.sh ./my_install/mlir build install ./my_install/llvm-aie
# build is created by the build-mlir-aie-from-wheels.sh script
pushd build
# -j here to reduce the number of parallel chess jobs.
# -j6 for 32GB RAM, -j12 for 64GB RAM
if [ x"${{ matrix.runner_type }}" == x"amdhx370" ]; then
LIT_OPTS="-j6 $LIT_OPTS"
else
LIT_OPTS="-j12 $LIT_OPTS"
fi
ninja install
ninja check-reference-designs
ninja check-programming-guide
popd