Message ID | 20241210204349.723590-21-alex.bennee@linaro.org |
---|---|
State | New |
Headers | show |
Series | testing/next: functional tests and qtest timers | expand |
On 12/10/24 14:43, Alex Bennée wrote: > Now we have virtio-gpu Vulkan support lets add a test for it. > Currently this is using images build by buildroot: > > https://lists.buildroot.org/pipermail/buildroot/2024-December/768196.html > > Signed-off-by: Alex Bennée <alex.bennee@linaro.org> > --- > tests/functional/test_aarch64_virt.py | 83 ++++++++++++++++++++++++++- > 1 file changed, 80 insertions(+), 3 deletions(-) Why is this not in a new file, so that it can run in parallel with the existing test? r~
On 10/12/2024 21.43, Alex Bennée wrote: > Now we have virtio-gpu Vulkan support lets add a test for it. > Currently this is using images build by buildroot: > > https://lists.buildroot.org/pipermail/buildroot/2024-December/768196.html > > Signed-off-by: Alex Bennée <alex.bennee@linaro.org> > --- > tests/functional/test_aarch64_virt.py | 83 ++++++++++++++++++++++++++- > 1 file changed, 80 insertions(+), 3 deletions(-) > > diff --git a/tests/functional/test_aarch64_virt.py b/tests/functional/test_aarch64_virt.py > index 801300607c..25d11e2626 100755 > --- a/tests/functional/test_aarch64_virt.py > +++ b/tests/functional/test_aarch64_virt.py > @@ -14,11 +14,12 @@ > import os > import logging > > +from qemu.machine.machine import VMLaunchFailure > + > from qemu_test import BUILD_DIR > from qemu_test import QemuSystemTest, Asset > -from qemu_test import exec_command, wait_for_console_pattern > -from qemu_test import get_qemu_img, run_cmd > - > +from qemu_test import exec_command, wait_for_console_pattern, exec_command_and_wait_for_pattern > +from qemu_test import has_cmd, get_qemu_img, run_cmd > > class Aarch64VirtMachine(QemuSystemTest): > KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' > @@ -125,5 +126,81 @@ def test_aarch64_virt_gicv2(self): > self.common_aarch64_virt("virt,gic-version=2") > > > + ASSET_VIRT_GPU_KERNEL = Asset( > + ('https://fileserver.linaro.org/s/ce5jXBFinPxtEdx/' > + 'download?path=%2F&files=' > + 'Image'), > + '89e5099d26166204cc5ca4bb6d1a11b92c217e1f82ec67e3ba363d09157462f6') > + > + ASSET_VIRT_GPU_ROOTFS = Asset( > + ('https://fileserver.linaro.org/s/ce5jXBFinPxtEdx/' > + 'download?path=%2F&files=' > + 'rootfs.ext4.zstd'), > + '792da7573f5dc2913ddb7c638151d4a6b2d028a4cb2afb38add513c1924bdad4') > + > + def test_aarch64_virt_with_gpu(self): > + # This tests boots with a buildroot test image that contains > + # vkmark and other GPU exercising tools. We run a headless > + # weston that nevertheless still exercises the virtio-gpu > + # backend. > + > + (has_zstd, msg) = has_cmd('zstd') > + if has_zstd is False: > + self.skipTest(msg) I'd recommend to use a decorator instead: @skipUnless(*has_cmd('zstd')) or if Daniel's patch series lands first, switch to @skipIfMissingCommands: https://lore.kernel.org/qemu-devel/20241129173120.761728-7-berrange@redhat.com/ > + self.zstd = 'zstd' You only use self.zstd once, so the detour through a variable seems unnecessary here? > + image_path_zst = self.ASSET_VIRT_GPU_ROOTFS.fetch() > + kernel_path = self.ASSET_VIRT_GPU_KERNEL.fetch() > + > + image_path = self.workdir + "/rootfs.ext4" > + > + run_cmd([self.zstd, "-f", "-d", image_path_zst, > + "-o", image_path]) > + > + self.set_machine('virt') Please move set_machine to the top of the function. Reasoning: It can cancel the test if the 'virt' machine has not been compiled into the binary. In that case you'd extracted the rootfs image in vain. > + self.vm.set_console() > + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + > + 'console=ttyAMA0 root=/dev/vda') > + self.require_accelerator("tcg") Same, please move to the beginning since it can skip the test. > + self.vm.add_args("-accel", "tcg") > + self.vm.add_args("-cpu", "neoverse-v1,pauth-impdef=on") > + self.vm.add_args("-machine", > + "virt,virtualization=on," > + "gic-version=max", > + '-kernel', kernel_path, > + '-append', kernel_command_line) > + self.vm.add_args("-smp", "2", "-m", "2048") > + self.vm.add_args("-device", "virtio-gpu-gl-pci,hostmem=4G,blob=on,venus=on") > + self.vm.add_args("-display", "egl-headless") > + self.vm.add_args("-display", "dbus,gl=on") > + self.vm.add_args("-device", "virtio-blk-device,drive=hd0") > + self.vm.add_args("-blockdev", > + "driver=raw,file.driver=file,node-name=hd0,read-only=on," > + f"file.filename={image_path}") > + self.vm.add_args("--snapshot") Any reason for using double dashes just here and not for the other commands? > + try: > + self.vm.launch() > + except VMLaunchFailure as e: > + if "old virglrenderer, blob resources unsupported" in e.output: > + self.skipTest("No blob support for virtio-gpu") > + elif "old virglrenderer, venus unsupported" in e.output: > + self.skipTest("No venus support for virtio-gpu") > + else: > + self.log.info(f"un-handled launch failure: {e.output}") s/un-handled/unhandled/ ? Thomas
Thomas Huth <thuth@redhat.com> writes: > On 10/12/2024 21.43, Alex Bennée wrote: >> Now we have virtio-gpu Vulkan support lets add a test for it. >> Currently this is using images build by buildroot: >> https://lists.buildroot.org/pipermail/buildroot/2024-December/768196.html >> Signed-off-by: Alex Bennée <alex.bennee@linaro.org> <snip> >> + self.vm.set_console() >> + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + >> + 'console=ttyAMA0 root=/dev/vda') >> + self.require_accelerator("tcg") > > Same, please move to the beginning since it can skip the test. > >> + self.vm.add_args("-accel", "tcg") Actually this could run under KVM if we have it for Aarch64. Can we represent that? >> + self.vm.add_args("-cpu", "neoverse-v1,pauth-impdef=on") I guess in that case we'd use -cpu host as well. >> + self.vm.add_args("-machine", >> + "virt,virtualization=on," >> + "gic-version=max", >> + '-kernel', kernel_path, >> + '-append', kernel_command_line) >> + self.vm.add_args("-smp", "2", "-m", "2048") >> + self.vm.add_args("-device", "virtio-gpu-gl-pci,hostmem=4G,blob=on,venus=on") >> + self.vm.add_args("-display", "egl-headless") >> + self.vm.add_args("-display", "dbus,gl=on") >> + self.vm.add_args("-device", "virtio-blk-device,drive=hd0") >> + self.vm.add_args("-blockdev", >> + "driver=raw,file.driver=file,node-name=hd0,read-only=on," >> + f"file.filename={image_path}") >> + self.vm.add_args("--snapshot") > > Any reason for using double dashes just here and not for the other commands? > >> + try: >> + self.vm.launch() >> + except VMLaunchFailure as e: >> + if "old virglrenderer, blob resources unsupported" in e.output: >> + self.skipTest("No blob support for virtio-gpu") >> + elif "old virglrenderer, venus unsupported" in e.output: >> + self.skipTest("No venus support for virtio-gpu") >> + else: >> + self.log.info(f"un-handled launch failure: {e.output}") > > s/un-handled/unhandled/ ? > > Thomas
On 12/12/2024 13.32, Alex Bennée wrote: > Thomas Huth <thuth@redhat.com> writes: > >> On 10/12/2024 21.43, Alex Bennée wrote: >>> Now we have virtio-gpu Vulkan support lets add a test for it. >>> Currently this is using images build by buildroot: >>> https://lists.buildroot.org/pipermail/buildroot/2024-December/768196.html >>> Signed-off-by: Alex Bennée <alex.bennee@linaro.org> > <snip> >>> + self.vm.set_console() >>> + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + >>> + 'console=ttyAMA0 root=/dev/vda') >>> + self.require_accelerator("tcg") >> >> Same, please move to the beginning since it can skip the test. >> >>> + self.vm.add_args("-accel", "tcg") > > Actually this could run under KVM if we have it for Aarch64. Can we > represent that? Simply omit the line "require" line and add an additional -accel kvm here? ... I think nobody ever tried to run the functional tests with another accelerator beside kvm and tcg on aarch64 hosts, so it should be fine. Thomas
diff --git a/tests/functional/test_aarch64_virt.py b/tests/functional/test_aarch64_virt.py index 801300607c..25d11e2626 100755 --- a/tests/functional/test_aarch64_virt.py +++ b/tests/functional/test_aarch64_virt.py @@ -14,11 +14,12 @@ import os import logging +from qemu.machine.machine import VMLaunchFailure + from qemu_test import BUILD_DIR from qemu_test import QemuSystemTest, Asset -from qemu_test import exec_command, wait_for_console_pattern -from qemu_test import get_qemu_img, run_cmd - +from qemu_test import exec_command, wait_for_console_pattern, exec_command_and_wait_for_pattern +from qemu_test import has_cmd, get_qemu_img, run_cmd class Aarch64VirtMachine(QemuSystemTest): KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' @@ -125,5 +126,81 @@ def test_aarch64_virt_gicv2(self): self.common_aarch64_virt("virt,gic-version=2") + ASSET_VIRT_GPU_KERNEL = Asset( + ('https://fileserver.linaro.org/s/ce5jXBFinPxtEdx/' + 'download?path=%2F&files=' + 'Image'), + '89e5099d26166204cc5ca4bb6d1a11b92c217e1f82ec67e3ba363d09157462f6') + + ASSET_VIRT_GPU_ROOTFS = Asset( + ('https://fileserver.linaro.org/s/ce5jXBFinPxtEdx/' + 'download?path=%2F&files=' + 'rootfs.ext4.zstd'), + '792da7573f5dc2913ddb7c638151d4a6b2d028a4cb2afb38add513c1924bdad4') + + def test_aarch64_virt_with_gpu(self): + # This tests boots with a buildroot test image that contains + # vkmark and other GPU exercising tools. We run a headless + # weston that nevertheless still exercises the virtio-gpu + # backend. + + (has_zstd, msg) = has_cmd('zstd') + if has_zstd is False: + self.skipTest(msg) + self.zstd = 'zstd' + + image_path_zst = self.ASSET_VIRT_GPU_ROOTFS.fetch() + kernel_path = self.ASSET_VIRT_GPU_KERNEL.fetch() + + image_path = self.workdir + "/rootfs.ext4" + + run_cmd([self.zstd, "-f", "-d", image_path_zst, + "-o", image_path]) + + self.set_machine('virt') + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyAMA0 root=/dev/vda') + self.require_accelerator("tcg") + + self.vm.add_args("-accel", "tcg") + self.vm.add_args("-cpu", "neoverse-v1,pauth-impdef=on") + self.vm.add_args("-machine", + "virt,virtualization=on," + "gic-version=max", + '-kernel', kernel_path, + '-append', kernel_command_line) + self.vm.add_args("-smp", "2", "-m", "2048") + self.vm.add_args("-device", "virtio-gpu-gl-pci,hostmem=4G,blob=on,venus=on") + self.vm.add_args("-display", "egl-headless") + self.vm.add_args("-display", "dbus,gl=on") + self.vm.add_args("-device", "virtio-blk-device,drive=hd0") + self.vm.add_args("-blockdev", + "driver=raw,file.driver=file,node-name=hd0,read-only=on," + f"file.filename={image_path}") + self.vm.add_args("--snapshot") + + try: + self.vm.launch() + except VMLaunchFailure as e: + if "old virglrenderer, blob resources unsupported" in e.output: + self.skipTest("No blob support for virtio-gpu") + elif "old virglrenderer, venus unsupported" in e.output: + self.skipTest("No venus support for virtio-gpu") + else: + self.log.info(f"un-handled launch failure: {e.output}") + raise e + + self.wait_for_console_pattern('buildroot login:') + exec_command(self, 'root') + exec_command(self, 'export XDG_RUNTIME_DIR=/tmp') + exec_command_and_wait_for_pattern(self, + "weston -B headless " + "--renderer gl " + "--shell kiosk " + "-- vkmark", + "vkmark Score") + + if __name__ == '__main__': QemuSystemTest.main()
Now we have virtio-gpu Vulkan support lets add a test for it. Currently this is using images build by buildroot: https://lists.buildroot.org/pipermail/buildroot/2024-December/768196.html Signed-off-by: Alex Bennée <alex.bennee@linaro.org> --- tests/functional/test_aarch64_virt.py | 83 ++++++++++++++++++++++++++- 1 file changed, 80 insertions(+), 3 deletions(-)