Skip to content

Commit

Permalink
Merge pull request #72 from GaloisInc/63-shared-images
Browse files Browse the repository at this point in the history
Share build artifacts and disk images between VMs

This branch improves the handling of VM disk images:

* Images are created using QEMU's "backing file" feature, which allows sharing a common base image between the host and guest images.  This means we keep only one copy of a basic Debian installation, instead of two.
* Images are now transparently compressed.  Combined with the previous point, this reduces the combined size of the host and guest images from 7 GB to 600 MB.
* Software such as vm_runner, vhost-device-gpio, and our patched QEMU is built as .deb packages so it can be easily installed into the VM images.
* .deb packages and VM images are built and cached during CI.
  • Loading branch information
spernsteiner authored Jun 24, 2024
2 parents 8e0db30 + 3ef873a commit 7777988
Show file tree
Hide file tree
Showing 36 changed files with 2,244 additions and 166 deletions.
221 changes: 214 additions & 7 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -64,17 +64,224 @@ jobs:
pip3 install -r requirements.txt
RTS_DEBUG=1 QUICK=1 python3 ./run_all.py
vmrunner:
vm_runner:
runs-on: ubuntu-22.04
steps:
- name: Install aarch64 toolchain
- uses: actions/checkout@v4
- name: Hash inputs
id: hash
run: |
cache_key="$(bash src/pkvm_setup/package.sh cache_key vm_runner)"
echo "Cache key: $cache_key"
echo "CACHE_KEY=$cache_key" >>$GITHUB_OUTPUT
echo "CACHE_KEY=$cache_key" >>$GITHUB_ENV
- name: Cache results
id: cache
uses: actions/cache@v3
with:
key: ${{ env.CACHE_KEY }}
path: packages/${{ env.CACHE_KEY }}.tar.gz
- if: ${{ steps.cache.outputs.cache-hit != 'true' }}
name: Install aarch64 toolchain
run: sudo apt-get install -y gcc-aarch64-linux-gnu
- uses: hecrj/setup-rust-action@v2
- if: ${{ steps.cache.outputs.cache-hit != 'true' }}
uses: hecrj/setup-rust-action@v2
with:
rust-version: 1.74
targets: aarch64-unknown-linux-gnu
- uses: actions/checkout@master
- name: Build VM runner
- if: ${{ steps.cache.outputs.cache-hit != 'true' }}
name: Build VM runner
run: |
bash src/pkvm_setup/package.sh full_build vm_runner
outputs:
CACHE_KEY: ${{ steps.hash.outputs.CACHE_KEY }}

vhost_device:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- name: Checkout submodules
run: |
git config --global url."https://podhrmic:${{ secrets.VERSE_VHOST_DEVICE_ACCESS_TOKEN }}@github.com/".insteadOf "[email protected]:"
git submodule update --init src/pkvm_setup/libgpiod
git submodule update --init src/pkvm_setup/vhost-device
- name: Hash inputs
id: hash
run: |
cache_key="$(bash src/pkvm_setup/package.sh cache_key vhost_device)"
echo "Cache key: $cache_key"
echo "CACHE_KEY=$cache_key" >>$GITHUB_OUTPUT
echo "CACHE_KEY=$cache_key" >>$GITHUB_ENV
- name: Cache results
id: cache
uses: actions/cache@v3
with:
key: ${{ env.CACHE_KEY }}
path: packages/${{ env.CACHE_KEY }}.tar.gz
- if: ${{ steps.cache.outputs.cache-hit != 'true' }}
name: Install dependency packages
run: |
sudo apt-get install -y \
build-essential autoconf automake autoconf-archive \
gcc-aarch64-linux-gnu
- if: ${{ steps.cache.outputs.cache-hit != 'true' }}
uses: hecrj/setup-rust-action@v2
with:
rust-version: 1.74
targets: aarch64-unknown-linux-gnu
- if: ${{ steps.cache.outputs.cache-hit != 'true' }}
name: Build vhost-device
run: |
# This must match the `rust-version` installed above.
export RUSTUP_TOOLCHAIN=1.74
bash src/pkvm_setup/package.sh full_build vhost_device
outputs:
CACHE_KEY: ${{ steps.hash.outputs.CACHE_KEY }}

pkvm:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- name: Checkout submodules
run: |
git submodule update --init src/pkvm_setup/linux-pkvm
- name: Hash inputs
id: hash
run: |
cache_key="$(bash src/pkvm_setup/package.sh cache_key pkvm)"
echo "Cache key: $cache_key"
echo "CACHE_KEY=$cache_key" >>$GITHUB_OUTPUT
echo "CACHE_KEY=$cache_key" >>$GITHUB_ENV
- name: Cache results
id: cache
uses: actions/cache@v3
with:
key: ${{ env.CACHE_KEY }}
path: packages/${{ env.CACHE_KEY }}.tar.gz
- if: ${{ steps.cache.outputs.cache-hit != 'true' }}
name: Fetch from Artifactory
run: |
bash src/pkvm_setup/package.sh download pkvm \
-u "${{ secrets.ARTIFACTORY_RDE_GENERIC_USERNAME }}:${{ secrets.ARTIFACTORY_RDE_GENERIC_ACCESS_TOKEN }}"
outputs:
CACHE_KEY: ${{ steps.hash.outputs.CACHE_KEY }}

qemu:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- name: Hash inputs
id: hash
run: |
cache_key="$(bash src/pkvm_setup/package.sh cache_key qemu)"
echo "Cache key: $cache_key"
echo "CACHE_KEY=$cache_key" >>$GITHUB_OUTPUT
echo "CACHE_KEY=$cache_key" >>$GITHUB_ENV
- name: Cache results
id: cache
uses: actions/cache@v3
with:
key: ${{ env.CACHE_KEY }}
path: packages/${{ env.CACHE_KEY }}.tar.gz
- if: ${{ steps.cache.outputs.cache-hit != 'true' }}
name: Fetch from Artifactory
run: |
bash src/pkvm_setup/package.sh download qemu \
-u "${{ secrets.ARTIFACTORY_RDE_GENERIC_USERNAME }}:${{ secrets.ARTIFACTORY_RDE_GENERIC_ACCESS_TOKEN }}"
outputs:
CACHE_KEY: ${{ steps.hash.outputs.CACHE_KEY }}

vm_image_base:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- name: Hash inputs
id: hash
run: |
cache_key="$(bash src/pkvm_setup/package.sh cache_key vm_image_base)"
echo "Cache key: $cache_key"
echo "CACHE_KEY=$cache_key" >>$GITHUB_OUTPUT
echo "CACHE_KEY=$cache_key" >>$GITHUB_ENV
- name: Cache results
id: cache
uses: actions/cache@v3
with:
key: ${{ env.CACHE_KEY }}
path: packages/${{ env.CACHE_KEY }}.tar.gz
- if: ${{ steps.cache.outputs.cache-hit != 'true' }}
name: Fetch from Artifactory
run: |
bash src/pkvm_setup/package.sh download vm_image_base \
-u "${{ secrets.ARTIFACTORY_RDE_GENERIC_USERNAME }}:${{ secrets.ARTIFACTORY_RDE_GENERIC_ACCESS_TOKEN }}"
outputs:
CACHE_KEY: ${{ steps.hash.outputs.CACHE_KEY }}

vm_images:
runs-on: ubuntu-22.04
needs:
- vm_runner
- vhost_device
- pkvm
- qemu
- vm_image_base
steps:
- uses: actions/checkout@v4
- name: Checkout submodules
run: |
git config --global url."https://podhrmic:${{ secrets.VERSE_VHOST_DEVICE_ACCESS_TOKEN }}@github.com/".insteadOf "[email protected]:"
git submodule update --init src/pkvm_setup/libgpiod
git submodule update --init src/pkvm_setup/vhost-device
git submodule update --init src/pkvm_setup/linux-pkvm
- name: Hash inputs
id: hash
run: |
cache_key="$(bash src/pkvm_setup/package.sh cache_key vm_images)"
echo "Cache key: $cache_key"
echo "CACHE_KEY=$cache_key" >>$GITHUB_OUTPUT
echo "CACHE_KEY=$cache_key" >>$GITHUB_ENV
- name: Cache results
id: cache
uses: actions/cache@v3
with:
key: ${{ env.CACHE_KEY }}
path: packages/${{ env.CACHE_KEY }}.tar.gz
- if: ${{ steps.cache.outputs.cache-hit != 'true' }}
name: "Cache restore: vm_runner"
uses: actions/cache/restore@v3
with:
key: ${{ needs.vm_runner.outputs.CACHE_KEY }}
path: packages/${{ needs.vm_runner.outputs.CACHE_KEY }}.tar.gz
- if: ${{ steps.cache.outputs.cache-hit != 'true' }}
name: "Cache restore: vhost_device"
uses: actions/cache/restore@v3
with:
key: ${{ needs.vhost_device.outputs.CACHE_KEY }}
path: packages/${{ needs.vhost_device.outputs.CACHE_KEY }}.tar.gz
- if: ${{ steps.cache.outputs.cache-hit != 'true' }}
name: "Cache restore: pkvm"
uses: actions/cache/restore@v3
with:
key: ${{ needs.pkvm.outputs.CACHE_KEY }}
path: packages/${{ needs.pkvm.outputs.CACHE_KEY }}.tar.gz
- if: ${{ steps.cache.outputs.cache-hit != 'true' }}
name: "Cache restore: qemu"
uses: actions/cache/restore@v3
with:
key: ${{ needs.qemu.outputs.CACHE_KEY }}
path: packages/${{ needs.qemu.outputs.CACHE_KEY }}.tar.gz
- if: ${{ steps.cache.outputs.cache-hit != 'true' }}
name: "Cache restore: vm_image_base"
uses: actions/cache/restore@v3
with:
key: ${{ needs.vm_image_base.outputs.CACHE_KEY }}
path: packages/${{ needs.vm_image_base.outputs.CACHE_KEY }}.tar.gz
- if: ${{ steps.cache.outputs.cache-hit != 'true' }}
name: Install dependency packages
run: |
sudo apt-get install -y qemu-system-arm qemu-utils
- if: ${{ steps.cache.outputs.cache-hit != 'true' }}
name: Build VM images
run: |
cd src/vm_runner
cargo build --release --target aarch64-unknown-linux-gnu
bash src/pkvm_setup/package.sh full_build vm_images
outputs:
CACHE_KEY: ${{ steps.hash.outputs.CACHE_KEY }}
5 changes: 5 additions & 0 deletions src/pkvm_setup/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
*.deb
*.buildinfo
*.changes
/qemu_build/
/vms/
47 changes: 17 additions & 30 deletions src/pkvm_setup/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -49,26 +49,19 @@ sudo apt install debian-installer-12-netboot-arm64
sudo apt build-dep linux
```

Now build the host and guest VMs:
Now build or fetch dependencies and build the VM images:

```sh
# Build the host and guest disk images. This takes 1-2 hours.
bash create_disk_images.sh

# Build our patched version of QEMU in the host VM. This takes 1-2 hours.
bash run_vm_script.sh vms/disk_host.img vm_scripts/install_qemu.sh

# Run the host VM.
bash run_vm.sh vms/disk_host.img
# Log in as `user`/`password`, or use `ssh -o Port=8022 user@localhost`.
bash package.sh full_build vm_runner
bash package.sh full_build vhost_device
# Use `full_build` instead of `download` to build locally
# (this may take several hours)
bash package.sh download pkvm
bash package.sh download qemu
bash package.sh download vm_image_base
bash package.sh full_build vm_images
```

Note: while the Debian installer is running, resizing the terminal may cause
the installer's display to be corrupted. If this happens, press `^A ^A ^L` to
redraw it. (`^A` is the escape character for QEMU's terminal multiplexer; `^A
^A` sends `^A` to the VM; and `^A ^L` in the VM causes the `screen` instance
that `debian-installer` sets up to redraw its display.)


# Running guests

Expand All @@ -80,14 +73,14 @@ To run a Linux guest:

```sh
# Base Platform:
bash copy_file.sh vms/disk_host.img vm_scripts/run_guest_qemu.sh
bash copy_file.sh vms/disk_host_dev.img vm_scripts/run_guest_qemu.sh
```

* Start the host VM with the guest disk attached:

```sh
# Base Platform:
bash run_vm_nested.sh vms/disk_host.img vms/disk_guest.img
bash run_vm_nested.sh vms/disk_host_dev.img vms/disk_guest_dev.img
```

* Log in to the host VM on the QEMU console or via SSH, as described above.
Expand Down Expand Up @@ -123,21 +116,21 @@ To run the Hello World guest:

```sh
# Outside:
bash run_vm_script.sh vms/disk_host.img vm_scripts/build_hello_world.sh
bash run_vm_script.sh vms/disk_host_dev.img vm_scripts/build_hello_world.sh
```

* Copy the Hello World guest script into the host VM:

```sh
# Outside:
bash copy_file.sh vms/disk_host.img vm_scripts/run_hello_qemu.sh
bash copy_file.sh vms/disk_host_dev.img vm_scripts/run_hello_qemu.sh
```

* Start the host VM:

```sh
# Outside:
bash run_vm.sh vms/disk_host.img
bash run_vm.sh vms/disk_host_dev.img
```

The Hello World guest doesn't use the guest disk image, so there's no need to
Expand Down Expand Up @@ -168,15 +161,9 @@ To run the Hello World guest:

# Using pKVM

First, build the pKVM kernel:

```sh
# Outside:
bash build_pkvm.sh
```

Then, boot the host VM and run guests as above, using `run_vm_nested_pkvm.sh`
in place of `run_vm_nested.sh`.
The VM images are built using the pKVM kernel, but don't enable pKVM mode by
default. To enable it, boot the host VM and run guests as above using
`run_vm_nested_pkvm.sh` in place of `run_vm_nested.sh`.

To check that pKVM is working, check the kernel messages in the host VM:

Expand Down
38 changes: 37 additions & 1 deletion src/pkvm_setup/build_libgpiod.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,43 @@
set -euo pipefail

# Dependencies: build-essential, autoconf, automake, autoconf-archive
#
# Additional dependencies for aarch64 cross-builds: gcc-aarch64-linux-gnu

target=
if [[ "$#" -ne 0 ]]; then
target="$1"
fi

build_dir=build
if [[ -n "$target" ]]; then
build_dir="$build_dir.$target"
fi

configure_args=()
case "$target" in
aarch64)
configure_args+=(
--host aarch64-unknown-linux-gnu
# We use `--disable-shared` here so that `vhost-device` will be
# forced to statically link libgpiod. This means one less
# dependency to worry about when installing into the VM.
#
# (Also, cross-compiling with `--enable-shared` doesn't seem to
# actually produce a shared version of the library, only a few
# broken symlinks.)
--disable-shared
CC=aarch64-linux-gnu-gcc
LD=aarch64-linux-gnu-gcc
)
;;
esac

cd libgpiod
./autogen.sh
mkdir -p "$build_dir"
cd "$build_dir"
# For some reason, doing ./autogen.sh and ./configure as separate steps in an
# out-of-tree build causes ./configure to complain that the source tree is
# already configured.
../autogen.sh "${configure_args[@]}"
make -j "$(nproc)"
6 changes: 4 additions & 2 deletions src/pkvm_setup/build_pkvm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,13 @@ mkdir -p vms/pkvm-boot
cd linux-pkvm
make ARCH=arm64 CC=clang CROSS_COMPILE=aarch64-linux-gnu- -j "$(nproc)" defconfig

# Include `pkvm` in the version string.
./scripts/config --set-str CONFIG_LOCALVERSION '-pkvm'

# Enable virtio GPIO and I2C drivers. We pass these through from outside to
# the host VM and then through to some of the guests, so the host needs the
# drivers.
./scripts/config -e CONFIG_GPIO_VIRTIO
./scripts/config -e CONFIG_I2C_VIRTIO

make ARCH=arm64 CC=clang CROSS_COMPILE=aarch64-linux-gnu- -j "$(nproc)" Image
cp -v arch/arm64/boot/Image ../vms/pkvm-boot/vmlinuz-pkvm
make ARCH=arm64 CC=clang CROSS_COMPILE=aarch64-linux-gnu- -j "$(nproc)" bindeb-pkg
Loading

0 comments on commit 7777988

Please sign in to comment.