@@ -1241,11 +1241,23 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
case GNTST_okay:
{
struct xen_page_foreign *foreign;
+ int page_cnt;
SetPageForeign(pages[i]);
foreign = xen_page_foreign(pages[i]);
foreign->domid = map_ops[i].dom;
foreign->gref = map_ops[i].ref;
+ page_cnt = page_count(pages[i]);
+ if (page_cnt > FOREIGN_MAX_PAGE_COUNT) {
+ /* foreign structure can't hold more than FOREIGN_MAX_PAGE_COUNT.
+ * That's why we save page_count = 1 so safe unmap mechanism will
+ * defer unmapping until all users stops using this page and let
+ * caller handle page users.
+ */
+ pr_warn_ratelimited("page have too many users. Will wait for 0 on umap\n");
+ foreign->private = 1;
+ } else
+ foreign->private = page_cnt;
break;
}
@@ -1308,9 +1320,11 @@ static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
{
int ret;
int pc;
+ struct xen_page_foreign *foreign;
for (pc = 0; pc < item->count; pc++) {
- if (page_count(item->pages[pc]) > 1) {
+ foreign = xen_page_foreign(item->pages[pc]);
+ if (page_count(item->pages[pc]) > foreign->private) {
unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
schedule_delayed_work(&item->gnttab_work,
msecs_to_jiffies(delay));
@@ -49,6 +49,7 @@
#include <linux/mm_types.h>
#include <linux/page-flags.h>
#include <linux/kernel.h>
+#include <linux/limits.h>
/*
* Technically there's no reliably invalid grant reference or grant handle,
@@ -274,9 +275,11 @@ int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item);
void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count);
void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count);
+#define FOREIGN_MAX_PAGE_COUNT U16_MAX
struct xen_page_foreign {
domid_t domid;
+ uint16_t private;
grant_ref_t gref;
};
Save the reference count of the page before mapping and use this value in gntdev_unmap_refs_async() call. This is the enhancement of the commit 3f9f1c67572f5e5e6dc84216d48d1480f3c4fcf6 ("xen/grant-table: add a mechanism to safely unmap pages that are in use"). Safe unmapping mechanism defers page that may being use (ref count > 1). This is needed to allow to map/unmap pages, which have more than 1 reference. For example, DRM_IOCTL_MODE_CREATE_DUMB creates dma buffer with page_count = 2, which unmap call would be deferred while buffer exists because ref count will never equals 1. This means the buffer remains mapped during DRM_IOCTL_MODE_DESTROY_DUMB call which causes an error: Unable to handle kernel paging request at virtual address <addr> .... Call trace: check_move_unevictable_folios+0xb8/0x4d0 check_move_unevictable_pages+0x8c/0x110 drm_gem_put_pages+0x118/0x198 drm_gem_shmem_put_pages_locked+0x4c/0x70 drm_gem_shmem_unpin+0x30/0x50 virtio_gpu_cleanup_object+0x84/0x130 virtio_gpu_cmd_unref_cb+0x18/0x2c virtio_gpu_dequeue_ctrl_func+0x124/0x290 process_one_work+0x1d0/0x320 worker_thread+0x14c/0x444 kthread+0x10c/0x110 This enhancement allows to provide the expected page_count during map call so refs could be unmapped properly without unneeded defers. Signed-off-by: Oleksii Moisieiev <oleksii_moisieiev@epam.com> --- drivers/xen/grant-table.c | 16 +++++++++++++++- include/xen/grant_table.h | 3 +++ 2 files changed, 18 insertions(+), 1 deletion(-)