diff mbox series

[v2,20/27] tests/functional: extend test_aarch64_virt with vulkan test

Message ID 20241218162104.3493551-21-alex.bennee@linaro.org
State New
Headers show
Series testing/next: functional tests, qtest clocks, vm and keymaps | expand

Commit Message

Alex Bennée Dec. 18, 2024, 4:20 p.m. UTC
Now we have virtio-gpu Vulkan support lets add a test for it.
Currently this is using images build by buildroot:

  https://lists.buildroot.org/pipermail/buildroot/2024-December/768196.html

Signed-off-by: Alex Bennée <alex.bennee@linaro.org>

---
v2
  - use decorator for has_cmd(zstd)
  - move set_machine/require_accelerator to top of test
  - un-handled->unhandled
  - drop extra - from --snapshot
  - drop unneeded virtualization=on
  - only show 1s of each scene
  - fix long lines
---
 tests/functional/test_aarch64_virt.py | 84 ++++++++++++++++++++++++++-
 1 file changed, 81 insertions(+), 3 deletions(-)

Comments

Thomas Huth Dec. 18, 2024, 4:37 p.m. UTC | #1
On 18/12/2024 17.20, Alex Bennée wrote:
> Now we have virtio-gpu Vulkan support lets add a test for it.

s/lets/let's/ ?

...
> diff --git a/tests/functional/test_aarch64_virt.py b/tests/functional/test_aarch64_virt.py
> index 453e84c39f..4ac66905b8 100755
> --- a/tests/functional/test_aarch64_virt.py
> +++ b/tests/functional/test_aarch64_virt.py
> @@ -13,11 +13,14 @@
>   import os
>   import logging
>   
> +from qemu.machine.machine import VMLaunchFailure
> +
>   from qemu_test import BUILD_DIR
>   from qemu_test import QemuSystemTest, Asset
>   from qemu_test import exec_command, wait_for_console_pattern
> -from qemu_test import get_qemu_img, run_cmd
> -
> +from qemu_test import exec_command_and_wait_for_pattern
> +from qemu_test import has_cmd, get_qemu_img, run_cmd
> +from unittest import skipUnless
>   
>   class Aarch64VirtMachine(QemuSystemTest):
>       KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
> @@ -101,7 +104,9 @@ def common_aarch64_virt(self, machine):
>   
>           # Add the device
>           self.vm.add_args('-blockdev',
> -                         f"driver=qcow2,file.driver=file,file.filename={image_path},node-name=scratch")
> +                         "driver=qcow2,file."
> +                         "driver=file,file."
> +                         f"filename={image_path},node-name=scratch")
>           self.vm.add_args('-device',
>                            'virtio-blk-device,drive=scratch')
>   
> @@ -130,5 +135,78 @@ def test_aarch64_virt_gicv2(self):
>           self.common_aarch64_virt("virt,gic-version=2")
>   
>   
> +    ASSET_VIRT_GPU_KERNEL = Asset(
> +        ('https://fileserver.linaro.org/s/ce5jXBFinPxtEdx/'
> +         'download?path=%2F&files='
> +         'Image'),
> +        '89e5099d26166204cc5ca4bb6d1a11b92c217e1f82ec67e3ba363d09157462f6')
> +
> +    ASSET_VIRT_GPU_ROOTFS = Asset(
> +        ('https://fileserver.linaro.org/s/ce5jXBFinPxtEdx/'
> +         'download?path=%2F&files='
> +         'rootfs.ext4.zstd'),
> +        '792da7573f5dc2913ddb7c638151d4a6b2d028a4cb2afb38add513c1924bdad4')
> +
> +    @skipUnless(*has_cmd('zstd'))

Please switch to the new @skipIfMissingCommands() decorate that will be 
provided by Daniel's patches in my pull request from today (it also removes 
has_cmd() so you need to respin this patch as soon as my PR lands).

  Thanks,
   Thomas
Daniel P. Berrangé Dec. 18, 2024, 4:39 p.m. UTC | #2
On Wed, Dec 18, 2024 at 04:20:56PM +0000, Alex Bennée wrote:
> Now we have virtio-gpu Vulkan support lets add a test for it.
> Currently this is using images build by buildroot:
> 
>   https://lists.buildroot.org/pipermail/buildroot/2024-December/768196.html
> 
> Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
> 
> ---
> v2
>   - use decorator for has_cmd(zstd)
>   - move set_machine/require_accelerator to top of test
>   - un-handled->unhandled
>   - drop extra - from --snapshot
>   - drop unneeded virtualization=on
>   - only show 1s of each scene
>   - fix long lines
> ---
>  tests/functional/test_aarch64_virt.py | 84 ++++++++++++++++++++++++++-
>  1 file changed, 81 insertions(+), 3 deletions(-)
> 

> +    @skipUnless(*has_cmd('zstd'))
> +    def test_aarch64_virt_with_gpu(self):
> +        # This tests boots with a buildroot test image that contains
> +        # vkmark and other GPU exercising tools. We run a headless
> +        # weston that nevertheless still exercises the virtio-gpu
> +        # backend.
> +
> +        self.set_machine('virt')
> +        self.require_accelerator("tcg")
> +
> +        image_path_zst = self.ASSET_VIRT_GPU_ROOTFS.fetch()
> +        kernel_path = self.ASSET_VIRT_GPU_KERNEL.fetch()
> +
> +        image_path = self.workdir + "/rootfs.ext4"

Pending PULL has removed all direct access to self.workdir,
use  'self.scratch_file("rootfs.ext4") instead, except that
shouldn't be needed either....

> +
> +        run_cmd(['zstd', "-f", "-d", image_path_zst,
> +                 "-o", image_path])

...the pending PULL has added common helpers for uncompression,
but only covers gz + lzma so far. Can you create a 'zstd_uncompress'
in uncompress.py and wire it up in the same way. That would let
this code turn into

   image_path = self.uncompress(self.ASSERT_VIRT_GPU_ROOTFS)


With regards,
Daniel
diff mbox series

Patch

diff --git a/tests/functional/test_aarch64_virt.py b/tests/functional/test_aarch64_virt.py
index 453e84c39f..4ac66905b8 100755
--- a/tests/functional/test_aarch64_virt.py
+++ b/tests/functional/test_aarch64_virt.py
@@ -13,11 +13,14 @@ 
 import os
 import logging
 
+from qemu.machine.machine import VMLaunchFailure
+
 from qemu_test import BUILD_DIR
 from qemu_test import QemuSystemTest, Asset
 from qemu_test import exec_command, wait_for_console_pattern
-from qemu_test import get_qemu_img, run_cmd
-
+from qemu_test import exec_command_and_wait_for_pattern
+from qemu_test import has_cmd, get_qemu_img, run_cmd
+from unittest import skipUnless
 
 class Aarch64VirtMachine(QemuSystemTest):
     KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
@@ -101,7 +104,9 @@  def common_aarch64_virt(self, machine):
 
         # Add the device
         self.vm.add_args('-blockdev',
-                         f"driver=qcow2,file.driver=file,file.filename={image_path},node-name=scratch")
+                         "driver=qcow2,file."
+                         "driver=file,file."
+                         f"filename={image_path},node-name=scratch")
         self.vm.add_args('-device',
                          'virtio-blk-device,drive=scratch')
 
@@ -130,5 +135,78 @@  def test_aarch64_virt_gicv2(self):
         self.common_aarch64_virt("virt,gic-version=2")
 
 
+    ASSET_VIRT_GPU_KERNEL = Asset(
+        ('https://fileserver.linaro.org/s/ce5jXBFinPxtEdx/'
+         'download?path=%2F&files='
+         'Image'),
+        '89e5099d26166204cc5ca4bb6d1a11b92c217e1f82ec67e3ba363d09157462f6')
+
+    ASSET_VIRT_GPU_ROOTFS = Asset(
+        ('https://fileserver.linaro.org/s/ce5jXBFinPxtEdx/'
+         'download?path=%2F&files='
+         'rootfs.ext4.zstd'),
+        '792da7573f5dc2913ddb7c638151d4a6b2d028a4cb2afb38add513c1924bdad4')
+
+    @skipUnless(*has_cmd('zstd'))
+    def test_aarch64_virt_with_gpu(self):
+        # This tests boots with a buildroot test image that contains
+        # vkmark and other GPU exercising tools. We run a headless
+        # weston that nevertheless still exercises the virtio-gpu
+        # backend.
+
+        self.set_machine('virt')
+        self.require_accelerator("tcg")
+
+        image_path_zst = self.ASSET_VIRT_GPU_ROOTFS.fetch()
+        kernel_path = self.ASSET_VIRT_GPU_KERNEL.fetch()
+
+        image_path = self.workdir + "/rootfs.ext4"
+
+        run_cmd(['zstd', "-f", "-d", image_path_zst,
+                 "-o", image_path])
+
+        self.vm.set_console()
+        kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+                               'console=ttyAMA0 root=/dev/vda')
+
+        self.vm.add_args("-accel", "tcg")
+        self.vm.add_args("-cpu", "neoverse-v1,pauth-impdef=on")
+        self.vm.add_args("-machine", "virt,gic-version=max",
+                         '-kernel', kernel_path,
+                         '-append', kernel_command_line)
+        self.vm.add_args("-smp", "2", "-m", "2048")
+        self.vm.add_args("-device",
+                         "virtio-gpu-gl-pci,hostmem=4G,blob=on,venus=on")
+        self.vm.add_args("-display", "egl-headless")
+        self.vm.add_args("-display", "dbus,gl=on")
+        self.vm.add_args("-device", "virtio-blk-device,drive=hd0")
+        self.vm.add_args("-blockdev",
+                         "driver=raw,file.driver=file,"
+                         "node-name=hd0,read-only=on,"
+                         f"file.filename={image_path}")
+        self.vm.add_args("-snapshot")
+
+        try:
+            self.vm.launch()
+        except VMLaunchFailure as excp:
+            if "old virglrenderer, blob resources unsupported" in excp.output:
+                self.skipTest("No blob support for virtio-gpu")
+            elif "old virglrenderer, venus unsupported" in excp.output:
+                self.skipTest("No venus support for virtio-gpu")
+            else:
+                self.log.info("unhandled launch failure: {excp.output}")
+                raise excp
+
+        self.wait_for_console_pattern('buildroot login:')
+        exec_command(self, 'root')
+        exec_command(self, 'export XDG_RUNTIME_DIR=/tmp')
+        exec_command_and_wait_for_pattern(self,
+                                          "weston -B headless "
+                                          "--renderer gl "
+                                          "--shell kiosk "
+                                          "-- vkmark -b:duration=1.0",
+                                          "vkmark Score")
+
+
 if __name__ == '__main__':
     QemuSystemTest.main()