pH/kernel/patches/0001-virtio-wl.patch

1867 lines
50 KiB
Diff
Raw Normal View History

2023-01-26 15:25:28 -05:00
diff -uprN linux-6.0.12/drivers/virtio/Kconfig linux-6.0.12-wl/drivers/virtio/Kconfig
--- linux-6.0.12/drivers/virtio/Kconfig 2022-12-08 05:30:22.000000000 -0500
+++ linux-6.0.12-wl/drivers/virtio/Kconfig 2022-12-30 13:24:24.416282173 -0500
@@ -173,4 +173,12 @@ config VIRTIO_DMA_SHARED_BUFFER
This option adds a flavor of dma buffers that are backed by
virtio resources.
2017-10-15 22:36:00 -04:00
+config VIRTIO_WL
+ bool "Virtio Wayland driver"
2023-01-26 15:25:28 -05:00
+ depends on VIRTIO && MMU
+ help
2017-10-15 22:36:00 -04:00
+ This driver supports proxying of a wayland socket from host to guest.
+
+ If unsure, say 'N'.
+
2023-01-26 15:25:28 -05:00
endif # VIRTIO_MENU
diff -uprN linux-6.0.12/drivers/virtio/Makefile linux-6.0.12-wl/drivers/virtio/Makefile
--- linux-6.0.12/drivers/virtio/Makefile 2022-12-08 05:30:22.000000000 -0500
+++ linux-6.0.12-wl/drivers/virtio/Makefile 2022-12-30 13:25:03.886404257 -0500
@@ -12,3 +12,4 @@ obj-$(CONFIG_VIRTIO_INPUT) += virtio_inp
obj-$(CONFIG_VIRTIO_VDPA) += virtio_vdpa.o
obj-$(CONFIG_VIRTIO_MEM) += virtio_mem.o
obj-$(CONFIG_VIRTIO_DMA_SHARED_BUFFER) += virtio_dma_buf.o
2017-10-15 22:36:00 -04:00
+obj-$(CONFIG_VIRTIO_WL) += virtio_wl.o
2023-01-26 15:25:28 -05:00
diff -uprN linux-6.0.12/drivers/virtio/virtio_wl.c linux-6.0.12-wl/drivers/virtio/virtio_wl.c
--- linux-6.0.12/drivers/virtio/virtio_wl.c 1969-12-31 19:00:00.000000000 -0500
+++ linux-6.0.12-wl/drivers/virtio/virtio_wl.c 2022-12-30 13:15:37.000000000 -0500
@@ -0,0 +1,1598 @@
2017-10-15 22:36:00 -04:00
+/*
+ * Wayland Virtio Driver
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
2019-09-20 17:55:12 -04:00
+ * Virtio Wayland (virtio_wl or virtwl) is a virtual device that allows a guest
+ * virtual machine to use a wayland server on the host transparently (to the
+ * host). This is done by proxying the wayland protocol socket stream verbatim
+ * between the host and guest over 2 (recv and send) virtio queues. The guest
+ * can request new wayland server connections to give each guest wayland client
+ * a different server context. Each host connection's file descriptor is exposed
+ * to the guest as a virtual file descriptor (VFD). Additionally, the guest can
+ * request shared memory file descriptors which are also exposed as VFDs. These
+ * shared memory VFDs are directly writable by the guest via device memory
+ * injected by the host. Each VFD is sendable along a connection context VFD and
+ * will appear as ancillary data to the wayland server, just like a message from
+ * an ordinary wayland client. When the wayland server sends a shared memory
+ * file descriptor to the client (such as when sending a keymap), a VFD is
+ * allocated by the device automatically and its memory is injected into as
+ * device memory.
+ *
+ * This driver is intended to be paired with the `virtwl_guest_proxy` program
+ * which is run in the guest system and acts like a wayland server. It accepts
+ * wayland client connections and converts their socket messages to ioctl
+ * messages exposed by this driver via the `/dev/wl` device file. While it would
+ * be possible to expose a unix stream socket from this driver, the user space
+ * helper is much cleaner to write.
+ */
2017-10-15 22:36:00 -04:00
+
+#include <linux/anon_inodes.h>
+#include <linux/cdev.h>
2019-09-20 17:55:12 -04:00
+#include <linux/compat.h>
2017-10-15 22:36:00 -04:00
+#include <linux/completion.h>
2019-09-20 17:55:12 -04:00
+#include <linux/dma-buf.h>
2017-10-15 22:36:00 -04:00
+#include <linux/err.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/kfifo.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/scatterlist.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <linux/virtio.h>
2023-01-26 15:25:28 -05:00
+#include <linux/virtio_dma_buf.h>
2017-10-15 22:36:00 -04:00
+#include <linux/virtio_wl.h>
2023-01-26 15:25:28 -05:00
+#include <linux/vmalloc.h>
2017-10-15 22:36:00 -04:00
+
2019-09-20 17:55:12 -04:00
+#include <uapi/linux/dma-buf.h>
+
+#ifdef CONFIG_DRM_VIRTIO_GPU
+#define SEND_VIRTGPU_RESOURCES
2023-01-26 15:25:28 -05:00
+#include <linux/sync_file.h>
2019-09-20 17:55:12 -04:00
+#endif
+
2017-10-15 22:36:00 -04:00
+#define VFD_ILLEGAL_SIGN_BIT 0x80000000
+#define VFD_HOST_VFD_ID_BIT 0x40000000
+
+struct virtwl_vfd_qentry {
+ struct list_head list;
+ struct virtio_wl_ctrl_hdr *hdr;
+ unsigned int len; /* total byte length of ctrl_vfd_* + vfds + data */
+ unsigned int vfd_offset; /* int offset into vfds */
+ unsigned int data_offset; /* byte offset into data */
+};
+
+struct virtwl_vfd {
+ struct kobject kobj;
+ struct mutex lock;
+
+ struct virtwl_info *vi;
+ uint32_t id;
+ uint32_t flags;
+ uint64_t pfn;
+ uint32_t size;
2019-09-20 17:55:12 -04:00
+ bool hungup;
2017-10-15 22:36:00 -04:00
+
+ struct list_head in_queue; /* list of virtwl_vfd_qentry */
+ wait_queue_head_t in_waitq;
+};
+
+struct virtwl_info {
+ dev_t dev_num;
+ struct device *dev;
+ struct class *class;
+ struct cdev cdev;
+
+ struct mutex vq_locks[VIRTWL_QUEUE_COUNT];
+ struct virtqueue *vqs[VIRTWL_QUEUE_COUNT];
+ struct work_struct in_vq_work;
+ struct work_struct out_vq_work;
+
+ wait_queue_head_t out_waitq;
+
+ struct mutex vfds_lock;
+ struct idr vfds;
2023-01-26 15:25:28 -05:00
+
+ bool use_send_vfd_v2;
2017-10-15 22:36:00 -04:00
+};
+
+static struct virtwl_vfd *virtwl_vfd_alloc(struct virtwl_info *vi);
+static void virtwl_vfd_free(struct virtwl_vfd *vfd);
+
2019-09-20 17:55:12 -04:00
+static const struct file_operations virtwl_vfd_fops;
2017-10-15 22:36:00 -04:00
+
+static int virtwl_resp_err(unsigned int type)
+{
+ switch (type) {
2019-09-20 17:55:12 -04:00
+ case VIRTIO_WL_RESP_OK:
+ case VIRTIO_WL_RESP_VFD_NEW:
+ case VIRTIO_WL_RESP_VFD_NEW_DMABUF:
+ return 0;
+ case VIRTIO_WL_RESP_ERR:
+ return -ENODEV; /* Device is no longer reliable */
+ case VIRTIO_WL_RESP_OUT_OF_MEMORY:
+ return -ENOMEM;
+ case VIRTIO_WL_RESP_INVALID_ID:
+ return -ENOENT;
+ case VIRTIO_WL_RESP_INVALID_TYPE:
+ return -EINVAL;
+ case VIRTIO_WL_RESP_INVALID_FLAGS:
+ return -EPERM;
+ case VIRTIO_WL_RESP_INVALID_CMD:
+ return -ENOTTY;
+ default:
+ return -EPROTO;
2017-10-15 22:36:00 -04:00
+ }
+}
+
+static int vq_return_inbuf_locked(struct virtqueue *vq, void *buffer)
+{
+ int ret;
+ struct scatterlist sg[1];
2019-09-20 17:55:12 -04:00
+
2017-10-15 22:36:00 -04:00
+ sg_init_one(sg, buffer, PAGE_SIZE);
+
+ ret = virtqueue_add_inbuf(vq, sg, 1, buffer, GFP_KERNEL);
+ if (ret) {
2019-09-20 17:55:12 -04:00
+ pr_warn("virtwl: failed to give inbuf to host: %d\n", ret);
2017-10-15 22:36:00 -04:00
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vq_queue_out(struct virtwl_info *vi, struct scatterlist *out_sg,
+ struct scatterlist *in_sg,
+ struct completion *finish_completion,
+ bool nonblock)
+{
+ struct virtqueue *vq = vi->vqs[VIRTWL_VQ_OUT];
+ struct mutex *vq_lock = &vi->vq_locks[VIRTWL_VQ_OUT];
+ struct scatterlist *sgs[] = { out_sg, in_sg };
+ int ret = 0;
+
+ mutex_lock(vq_lock);
+ while ((ret = virtqueue_add_sgs(vq, sgs, 1, 1, finish_completion,
2019-09-20 17:55:12 -04:00
+ GFP_KERNEL)) == -ENOSPC) {
2017-10-15 22:36:00 -04:00
+ mutex_unlock(vq_lock);
+ if (nonblock)
+ return -EAGAIN;
+ if (!wait_event_timeout(vi->out_waitq, vq->num_free > 0, HZ))
+ return -EBUSY;
+ mutex_lock(vq_lock);
+ }
+ if (!ret)
+ virtqueue_kick(vq);
+ mutex_unlock(vq_lock);
+
+ return ret;
+}
+
+static int vq_fill_locked(struct virtqueue *vq)
+{
+ void *buffer;
+ int ret = 0;
+
+ while (vq->num_free > 0) {
+ buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto clear_queue;
+ }
+
+ ret = vq_return_inbuf_locked(vq, buffer);
+ if (ret)
+ goto clear_queue;
+ }
+
+ return 0;
+
+clear_queue:
2019-09-20 17:55:12 -04:00
+ while ((buffer = virtqueue_detach_unused_buf(vq)))
2017-10-15 22:36:00 -04:00
+ kfree(buffer);
+ return ret;
+}
+
+static bool vq_handle_new(struct virtwl_info *vi,
+ struct virtio_wl_ctrl_vfd_new *new, unsigned int len)
+{
+ struct virtwl_vfd *vfd;
+ u32 id = new->vfd_id;
+ int ret;
+
+ if (id == 0)
+ return true; /* return the inbuf to vq */
+
+ if (!(id & VFD_HOST_VFD_ID_BIT) || (id & VFD_ILLEGAL_SIGN_BIT)) {
2019-09-20 17:55:12 -04:00
+ pr_warn("virtwl: received a vfd with invalid id: %u\n", id);
2017-10-15 22:36:00 -04:00
+ return true; /* return the inbuf to vq */
+ }
+
+ vfd = virtwl_vfd_alloc(vi);
+ if (!vfd)
+ return true; /* return the inbuf to vq */
+
+ mutex_lock(&vi->vfds_lock);
+ ret = idr_alloc(&vi->vfds, vfd, id, id + 1, GFP_KERNEL);
+ mutex_unlock(&vi->vfds_lock);
+
+ if (ret <= 0) {
+ virtwl_vfd_free(vfd);
2019-09-20 17:55:12 -04:00
+ pr_warn("virtwl: failed to place received vfd: %d\n", ret);
2017-10-15 22:36:00 -04:00
+ return true; /* return the inbuf to vq */
+ }
+
+ vfd->id = id;
+ vfd->size = new->size;
+ vfd->pfn = new->pfn;
+ vfd->flags = new->flags;
+
+ return true; /* return the inbuf to vq */
+}
+
+static bool vq_handle_recv(struct virtwl_info *vi,
+ struct virtio_wl_ctrl_vfd_recv *recv,
+ unsigned int len)
+{
+ struct virtwl_vfd *vfd;
+ struct virtwl_vfd_qentry *qentry;
+
+ mutex_lock(&vi->vfds_lock);
+ vfd = idr_find(&vi->vfds, recv->vfd_id);
+ if (vfd)
+ mutex_lock(&vfd->lock);
+ mutex_unlock(&vi->vfds_lock);
+
+ if (!vfd) {
2019-09-20 17:55:12 -04:00
+ pr_warn("virtwl: recv for unknown vfd_id %u\n", recv->vfd_id);
2017-10-15 22:36:00 -04:00
+ return true; /* return the inbuf to vq */
+ }
+
+ qentry = kzalloc(sizeof(*qentry), GFP_KERNEL);
+ if (!qentry) {
+ mutex_unlock(&vfd->lock);
2019-09-20 17:55:12 -04:00
+ pr_warn("virtwl: failed to allocate qentry for vfd\n");
2017-10-15 22:36:00 -04:00
+ return true; /* return the inbuf to vq */
+ }
+
+ qentry->hdr = &recv->hdr;
+ qentry->len = len;
+
+ list_add_tail(&qentry->list, &vfd->in_queue);
2019-09-20 17:55:12 -04:00
+ wake_up_interruptible_all(&vfd->in_waitq);
2017-10-15 22:36:00 -04:00
+ mutex_unlock(&vfd->lock);
+
+ return false; /* no return the inbuf to vq */
+}
+
2019-09-20 17:55:12 -04:00
+static bool vq_handle_hup(struct virtwl_info *vi,
+ struct virtio_wl_ctrl_vfd *vfd_hup,
+ unsigned int len)
+{
+ struct virtwl_vfd *vfd;
+
+ mutex_lock(&vi->vfds_lock);
+ vfd = idr_find(&vi->vfds, vfd_hup->vfd_id);
+ if (vfd)
+ mutex_lock(&vfd->lock);
+ mutex_unlock(&vi->vfds_lock);
+
+ if (!vfd) {
+ pr_warn("virtwl: hup for unknown vfd_id %u\n", vfd_hup->vfd_id);
+ return true; /* return the inbuf to vq */
+ }
+
+ if (vfd->hungup)
+ pr_warn("virtwl: hup for hungup vfd_id %u\n", vfd_hup->vfd_id);
+
+ vfd->hungup = true;
+ wake_up_interruptible_all(&vfd->in_waitq);
+ mutex_unlock(&vfd->lock);
+
+ return true;
+}
+
2017-10-15 22:36:00 -04:00
+static bool vq_dispatch_hdr(struct virtwl_info *vi, unsigned int len,
+ struct virtio_wl_ctrl_hdr *hdr)
+{
+ struct virtqueue *vq = vi->vqs[VIRTWL_VQ_IN];
+ struct mutex *vq_lock = &vi->vq_locks[VIRTWL_VQ_IN];
+ bool return_vq = true;
+ int ret;
+
+ switch (hdr->type) {
+ case VIRTIO_WL_CMD_VFD_NEW:
+ return_vq = vq_handle_new(vi,
+ (struct virtio_wl_ctrl_vfd_new *)hdr,
+ len);
+ break;
+ case VIRTIO_WL_CMD_VFD_RECV:
+ return_vq = vq_handle_recv(vi,
+ (struct virtio_wl_ctrl_vfd_recv *)hdr, len);
+ break;
2019-09-20 17:55:12 -04:00
+ case VIRTIO_WL_CMD_VFD_HUP:
+ return_vq = vq_handle_hup(vi, (struct virtio_wl_ctrl_vfd *)hdr,
+ len);
+ break;
2017-10-15 22:36:00 -04:00
+ default:
2019-09-20 17:55:12 -04:00
+ pr_warn("virtwl: unhandled ctrl command: %u\n", hdr->type);
2017-10-15 22:36:00 -04:00
+ break;
+ }
+
+ if (!return_vq)
+ return false; /* no kick the vq */
+
+ mutex_lock(vq_lock);
+ ret = vq_return_inbuf_locked(vq, hdr);
+ mutex_unlock(vq_lock);
+ if (ret) {
2019-09-20 17:55:12 -04:00
+ pr_warn("virtwl: failed to return inbuf to host: %d\n", ret);
2017-10-15 22:36:00 -04:00
+ kfree(hdr);
+ }
+
+ return true; /* kick the vq */
+}
+
+static void vq_in_work_handler(struct work_struct *work)
+{
+ struct virtwl_info *vi = container_of(work, struct virtwl_info,
+ in_vq_work);
+ struct virtqueue *vq = vi->vqs[VIRTWL_VQ_IN];
+ struct mutex *vq_lock = &vi->vq_locks[VIRTWL_VQ_IN];
+ void *buffer;
+ unsigned int len;
+ bool kick_vq = false;
+
+ mutex_lock(vq_lock);
+ while ((buffer = virtqueue_get_buf(vq, &len)) != NULL) {
+ struct virtio_wl_ctrl_hdr *hdr = buffer;
2019-09-20 17:55:12 -04:00
+
2017-10-15 22:36:00 -04:00
+ mutex_unlock(vq_lock);
+ kick_vq |= vq_dispatch_hdr(vi, len, hdr);
+ mutex_lock(vq_lock);
+ }
+ mutex_unlock(vq_lock);
+
+ if (kick_vq)
+ virtqueue_kick(vq);
+}
+
+static void vq_out_work_handler(struct work_struct *work)
+{
+ struct virtwl_info *vi = container_of(work, struct virtwl_info,
+ out_vq_work);
+ struct virtqueue *vq = vi->vqs[VIRTWL_VQ_OUT];
+ struct mutex *vq_lock = &vi->vq_locks[VIRTWL_VQ_OUT];
+ unsigned int len;
+ struct completion *finish_completion;
+ bool wake_waitq = false;
+
+ mutex_lock(vq_lock);
+ while ((finish_completion = virtqueue_get_buf(vq, &len)) != NULL) {
+ wake_waitq = true;
+ complete(finish_completion);
+ }
+ mutex_unlock(vq_lock);
+
+ if (wake_waitq)
2019-09-20 17:55:12 -04:00
+ wake_up_interruptible_all(&vi->out_waitq);
2017-10-15 22:36:00 -04:00
+}
+
+static void vq_in_cb(struct virtqueue *vq)
+{
+ struct virtwl_info *vi = vq->vdev->priv;
2019-09-20 17:55:12 -04:00
+
2017-10-15 22:36:00 -04:00
+ schedule_work(&vi->in_vq_work);
+}
+
+static void vq_out_cb(struct virtqueue *vq)
+{
+ struct virtwl_info *vi = vq->vdev->priv;
2019-09-20 17:55:12 -04:00
+
2017-10-15 22:36:00 -04:00
+ schedule_work(&vi->out_vq_work);
+}
+
+static struct virtwl_vfd *virtwl_vfd_alloc(struct virtwl_info *vi)
+{
+ struct virtwl_vfd *vfd = kzalloc(sizeof(struct virtwl_vfd), GFP_KERNEL);
2019-09-20 17:55:12 -04:00
+
2017-10-15 22:36:00 -04:00
+ if (!vfd)
+ return ERR_PTR(-ENOMEM);
+
+ vfd->vi = vi;
+
+ mutex_init(&vfd->lock);
+ INIT_LIST_HEAD(&vfd->in_queue);
+ init_waitqueue_head(&vfd->in_waitq);
+
+ return vfd;
+}
+
2019-09-20 17:55:12 -04:00
+static int virtwl_vfd_file_flags(struct virtwl_vfd *vfd)
+{
+ int flags = 0;
+ int rw_mask = VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ;
+
+ if ((vfd->flags & rw_mask) == rw_mask)
+ flags |= O_RDWR;
+ else if (vfd->flags & VIRTIO_WL_VFD_WRITE)
+ flags |= O_WRONLY;
+ else if (vfd->flags & VIRTIO_WL_VFD_READ)
+ flags |= O_RDONLY;
+ if (vfd->pfn)
+ flags |= O_RDWR;
+ return flags;
+}
+
2017-10-15 22:36:00 -04:00
+/* Locks the vfd and unlinks its id from vi */
+static void virtwl_vfd_lock_unlink(struct virtwl_vfd *vfd)
+{
+ struct virtwl_info *vi = vfd->vi;
2019-09-20 17:55:12 -04:00
+
2017-10-15 22:36:00 -04:00
+ /* this order is important to avoid deadlock */
+ mutex_lock(&vi->vfds_lock);
+ mutex_lock(&vfd->lock);
+ idr_remove(&vi->vfds, vfd->id);
2019-09-20 17:55:12 -04:00
+ mutex_unlock(&vfd->lock);
2017-10-15 22:36:00 -04:00
+ mutex_unlock(&vi->vfds_lock);
+}
+
+/*
+ * Only used to free a vfd that is not referenced any place else and contains
+ * no queed virtio buffers. This must not be called while vfd is included in a
+ * vi->vfd.
+ */
+static void virtwl_vfd_free(struct virtwl_vfd *vfd)
+{
+ kfree(vfd);
+}
+
+/*
+ * Thread safe and also removes vfd from vi as well as any queued virtio buffers
+ */
+static void virtwl_vfd_remove(struct virtwl_vfd *vfd)
+{
+ struct virtwl_info *vi = vfd->vi;
+ struct virtqueue *vq = vi->vqs[VIRTWL_VQ_IN];
+ struct mutex *vq_lock = &vi->vq_locks[VIRTWL_VQ_IN];
+ struct virtwl_vfd_qentry *qentry, *next;
2019-09-20 17:55:12 -04:00
+
2017-10-15 22:36:00 -04:00
+ virtwl_vfd_lock_unlink(vfd);
+
+ mutex_lock(vq_lock);
2019-09-20 17:55:12 -04:00
+ list_for_each_entry_safe(qentry, next, &vfd->in_queue, list) {
2017-10-15 22:36:00 -04:00
+ vq_return_inbuf_locked(vq, qentry->hdr);
+ list_del(&qentry->list);
+ kfree(qentry);
+ }
+ mutex_unlock(vq_lock);
2019-09-20 17:55:12 -04:00
+ virtqueue_kick(vq);
2017-10-15 22:36:00 -04:00
+
+ virtwl_vfd_free(vfd);
+}
+
+static void vfd_qentry_free_if_empty(struct virtwl_vfd *vfd,
+ struct virtwl_vfd_qentry *qentry)
+{
+ struct virtwl_info *vi = vfd->vi;
+ struct virtqueue *vq = vi->vqs[VIRTWL_VQ_IN];
+ struct mutex *vq_lock = &vi->vq_locks[VIRTWL_VQ_IN];
+
+ if (qentry->hdr->type == VIRTIO_WL_CMD_VFD_RECV) {
+ struct virtio_wl_ctrl_vfd_recv *recv =
+ (struct virtio_wl_ctrl_vfd_recv *)qentry->hdr;
+ ssize_t data_len =
+ (ssize_t)qentry->len - (ssize_t)sizeof(*recv) -
+ (ssize_t)recv->vfd_count * (ssize_t)sizeof(__le32);
+
+ if (qentry->vfd_offset < recv->vfd_count)
+ return;
+
+ if ((s64)qentry->data_offset < data_len)
+ return;
+ }
+
+ mutex_lock(vq_lock);
+ vq_return_inbuf_locked(vq, qentry->hdr);
+ mutex_unlock(vq_lock);
+ list_del(&qentry->list);
+ kfree(qentry);
+ virtqueue_kick(vq);
+}
+
+static ssize_t vfd_out_locked(struct virtwl_vfd *vfd, char __user *buffer,
+ size_t len)
+{
+ struct virtwl_vfd_qentry *qentry, *next;
+ ssize_t read_count = 0;
+
+ list_for_each_entry_safe(qentry, next, &vfd->in_queue, list) {
+ struct virtio_wl_ctrl_vfd_recv *recv =
+ (struct virtio_wl_ctrl_vfd_recv *)qentry->hdr;
+ size_t recv_offset = sizeof(*recv) + recv->vfd_count *
+ sizeof(__le32) + qentry->data_offset;
+ u8 *buf = (u8 *)recv + recv_offset;
+ ssize_t to_read = (ssize_t)qentry->len - (ssize_t)recv_offset;
2019-09-20 17:55:12 -04:00
+
2017-10-15 22:36:00 -04:00
+ if (qentry->hdr->type != VIRTIO_WL_CMD_VFD_RECV)
+ continue;
+
+ if ((to_read + read_count) > len)
+ to_read = len - read_count;
+
+ if (copy_to_user(buffer + read_count, buf, to_read)) {
2019-09-20 17:55:12 -04:00
+ read_count = -EFAULT;
2017-10-15 22:36:00 -04:00
+ break;
+ }
+
+ read_count += to_read;
+
+ qentry->data_offset += to_read;
+ vfd_qentry_free_if_empty(vfd, qentry);
2019-09-20 17:55:12 -04:00
+
+ if (read_count >= len)
+ break;
2017-10-15 22:36:00 -04:00
+ }
+
+ return read_count;
+}
+
2019-09-20 17:55:12 -04:00
+/* must hold both vfd->lock and vi->vfds_lock */
2017-10-15 22:36:00 -04:00
+static size_t vfd_out_vfds_locked(struct virtwl_vfd *vfd,
+ struct virtwl_vfd **vfds, size_t count)
+{
+ struct virtwl_info *vi = vfd->vi;
+ struct virtwl_vfd_qentry *qentry, *next;
+ size_t i;
+ size_t read_count = 0;
+
+ list_for_each_entry_safe(qentry, next, &vfd->in_queue, list) {
+ struct virtio_wl_ctrl_vfd_recv *recv =
+ (struct virtio_wl_ctrl_vfd_recv *)qentry->hdr;
+ size_t vfd_offset = sizeof(*recv) + qentry->vfd_offset *
+ sizeof(__le32);
+ __le32 *vfds_le = (__le32 *)((void *)recv + vfd_offset);
+ ssize_t vfds_to_read = recv->vfd_count - qentry->vfd_offset;
2019-09-20 17:55:12 -04:00
+
2017-10-15 22:36:00 -04:00
+ if (read_count >= count)
+ break;
+ if (vfds_to_read <= 0)
+ continue;
+ if (qentry->hdr->type != VIRTIO_WL_CMD_VFD_RECV)
+ continue;
+
+ if ((vfds_to_read + read_count) > count)
+ vfds_to_read = count - read_count;
+
+ for (i = 0; i < vfds_to_read; i++) {
+ uint32_t vfd_id = le32_to_cpu(vfds_le[i]);
+ vfds[read_count] = idr_find(&vi->vfds, vfd_id);
+ if (vfds[read_count]) {
+ read_count++;
+ } else {
2019-09-20 17:55:12 -04:00
+ pr_warn("virtwl: received a vfd with unrecognized id: %u\n",
+ vfd_id);
2017-10-15 22:36:00 -04:00
+ }
+ qentry->vfd_offset++;
+ }
+
+ vfd_qentry_free_if_empty(vfd, qentry);
+ }
+
+ return read_count;
+}
+
+/* this can only be called if the caller has unique ownership of the vfd */
+static int do_vfd_close(struct virtwl_vfd *vfd)
+{
+ struct virtio_wl_ctrl_vfd *ctrl_close;
+ struct virtwl_info *vi = vfd->vi;
+ struct completion finish_completion;
+ struct scatterlist out_sg;
+ struct scatterlist in_sg;
+ int ret = 0;
+
+ ctrl_close = kzalloc(sizeof(*ctrl_close), GFP_KERNEL);
+ if (!ctrl_close)
+ return -ENOMEM;
+
+ ctrl_close->hdr.type = VIRTIO_WL_CMD_VFD_CLOSE;
+ ctrl_close->vfd_id = vfd->id;
+
2019-09-20 17:55:12 -04:00
+ sg_init_one(&out_sg, &ctrl_close->hdr,
+ sizeof(struct virtio_wl_ctrl_vfd));
+ sg_init_one(&in_sg, &ctrl_close->hdr,
+ sizeof(struct virtio_wl_ctrl_hdr));
2017-10-15 22:36:00 -04:00
+
+ init_completion(&finish_completion);
+ ret = vq_queue_out(vi, &out_sg, &in_sg, &finish_completion,
+ false /* block */);
+ if (ret) {
2019-09-20 17:55:12 -04:00
+ pr_warn("virtwl: failed to queue close vfd id %u: %d\n",
+ vfd->id,
+ ret);
2017-10-15 22:36:00 -04:00
+ goto free_ctrl_close;
+ }
+
+ wait_for_completion(&finish_completion);
+ virtwl_vfd_remove(vfd);
+
+free_ctrl_close:
+ kfree(ctrl_close);
+ return ret;
+}
+
+static ssize_t virtwl_vfd_recv(struct file *filp, char __user *buffer,
+ size_t len, struct virtwl_vfd **vfds,
+ size_t *vfd_count)
+{
+ struct virtwl_vfd *vfd = filp->private_data;
2019-09-20 17:55:12 -04:00
+ struct virtwl_info *vi = vfd->vi;
2017-10-15 22:36:00 -04:00
+ ssize_t read_count = 0;
+ size_t vfd_read_count = 0;
2019-09-20 17:55:12 -04:00
+ bool force_to_wait = false;
2017-10-15 22:36:00 -04:00
+
2019-09-20 17:55:12 -04:00
+ mutex_lock(&vi->vfds_lock);
2017-10-15 22:36:00 -04:00
+ mutex_lock(&vfd->lock);
+
+ while (read_count == 0 && vfd_read_count == 0) {
2019-09-20 17:55:12 -04:00
+ while (force_to_wait || list_empty(&vfd->in_queue)) {
+ force_to_wait = false;
+ if (vfd->hungup)
+ goto out_unlock;
+
2017-10-15 22:36:00 -04:00
+ mutex_unlock(&vfd->lock);
2019-09-20 17:55:12 -04:00
+ mutex_unlock(&vi->vfds_lock);
2017-10-15 22:36:00 -04:00
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ if (wait_event_interruptible(vfd->in_waitq,
2019-09-20 17:55:12 -04:00
+ !list_empty(&vfd->in_queue) || vfd->hungup))
2017-10-15 22:36:00 -04:00
+ return -ERESTARTSYS;
+
2019-09-20 17:55:12 -04:00
+ mutex_lock(&vi->vfds_lock);
2017-10-15 22:36:00 -04:00
+ mutex_lock(&vfd->lock);
+ }
+
+ read_count = vfd_out_locked(vfd, buffer, len);
+ if (read_count < 0)
+ goto out_unlock;
+ if (vfds && vfd_count && *vfd_count)
+ vfd_read_count = vfd_out_vfds_locked(vfd, vfds,
+ *vfd_count);
2019-09-20 17:55:12 -04:00
+ else if (read_count == 0 && !list_empty(&vfd->in_queue))
+ /*
+ * Indicates a corner case where the in_queue has ONLY
+ * incoming VFDs but the caller has given us no space to
+ * store them. We force a wait for more activity on the
+ * in_queue to prevent busy waiting.
+ */
+ force_to_wait = true;
2017-10-15 22:36:00 -04:00
+ }
+
+out_unlock:
+ mutex_unlock(&vfd->lock);
2019-09-20 17:55:12 -04:00
+ mutex_unlock(&vi->vfds_lock);
+ if (vfd_count)
+ *vfd_count = vfd_read_count;
2017-10-15 22:36:00 -04:00
+ return read_count;
+}
+
2019-09-20 17:55:12 -04:00
+static int encode_vfd_ids(struct virtwl_vfd **vfds, size_t vfd_count,
+ __le32 *vfd_ids)
2017-10-15 22:36:00 -04:00
+{
2019-09-20 17:55:12 -04:00
+ size_t i;
2017-10-15 22:36:00 -04:00
+
2019-09-20 17:55:12 -04:00
+ for (i = 0; i < vfd_count; i++) {
+ if (vfds[i])
+ vfd_ids[i] = cpu_to_le32(vfds[i]->id);
+ else
+ return -EBADFD;
2017-10-15 22:36:00 -04:00
+ }
+ return 0;
+}
+
2019-09-20 17:55:12 -04:00
+#ifdef SEND_VIRTGPU_RESOURCES
2023-01-26 15:25:28 -05:00
+static int get_dma_buf_id(struct dma_buf *dma_buf, u32 *id)
+{
+ uuid_t uuid;
+ int ret = 0;
+
+ ret = virtio_dma_buf_get_uuid(dma_buf, &uuid);
+ *id = be32_to_cpu(*(__be32 *)(uuid.b + 12));
+
+ return ret;
+}
+
+static int encode_fence(struct dma_fence *fence,
+ struct virtio_wl_ctrl_vfd_send_vfd_v2 *vfd_id)
+{
+ const char *name = fence->ops->get_driver_name(fence);
+
+ // We only support virtgpu based fences. Since all virtgpu fences are
+ // in the same context, merging sync_files will always reduce to a
+ // single virtgpu fence.
+ if (strcmp(name, "virtio_gpu") != 0)
+ return -EBADFD;
+
+ if (dma_fence_is_signaled(fence)) {
+ vfd_id->kind =
+ VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE;
+ } else {
+ vfd_id->kind = VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE;
+ vfd_id->seqno = cpu_to_le32(fence->seqno);
+ }
+ return 0;
+}
+
2019-09-20 17:55:12 -04:00
+static int encode_vfd_ids_foreign(struct virtwl_vfd **vfds,
+ struct dma_buf **virtgpu_dma_bufs,
2023-01-26 15:25:28 -05:00
+ struct dma_fence **virtgpu_dma_fence,
2019-09-20 17:55:12 -04:00
+ size_t vfd_count,
2023-01-26 15:25:28 -05:00
+ struct virtio_wl_ctrl_vfd_send_vfd *ids,
+ struct virtio_wl_ctrl_vfd_send_vfd_v2 *ids_v2)
2017-10-15 22:36:00 -04:00
+{
2019-09-20 17:55:12 -04:00
+ size_t i;
+ int ret;
2017-10-15 22:36:00 -04:00
+
2019-09-20 17:55:12 -04:00
+ for (i = 0; i < vfd_count; i++) {
2023-01-26 15:25:28 -05:00
+ uint32_t kind = UINT_MAX;
+ uint32_t id = 0;
+
2019-09-20 17:55:12 -04:00
+ if (vfds[i]) {
2023-01-26 15:25:28 -05:00
+ kind = VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL;
+ id = vfds[i]->id;
2019-09-20 17:55:12 -04:00
+ } else if (virtgpu_dma_bufs[i]) {
2023-01-26 15:25:28 -05:00
+ ret = get_dma_buf_id(virtgpu_dma_bufs[i],
+ &id);
+ if (ret)
+ return ret;
+ kind = VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU;
+ } else if (virtgpu_dma_fence[i]) {
+ ret = encode_fence(virtgpu_dma_fence[i],
+ ids_v2 + i);
2019-09-20 17:55:12 -04:00
+ if (ret)
+ return ret;
+ } else {
+ return -EBADFD;
+ }
2023-01-26 15:25:28 -05:00
+ if (kind != UINT_MAX) {
+ if (ids) {
+ ids[i].kind = kind;
+ ids[i].id = cpu_to_le32(id);
+ } else {
+ ids_v2[i].kind = kind;
+ ids_v2[i].id = cpu_to_le32(id);
+ }
+ }
2019-09-20 17:55:12 -04:00
+ }
2017-10-15 22:36:00 -04:00
+ return 0;
+}
2019-09-20 17:55:12 -04:00
+#endif
2017-10-15 22:36:00 -04:00
+
2019-09-20 17:55:12 -04:00
+static int virtwl_vfd_send(struct file *filp, const char __user *buffer,
+ u32 len, int *vfd_fds)
2017-10-15 22:36:00 -04:00
+{
2019-09-20 17:55:12 -04:00
+ struct virtwl_vfd *vfd = filp->private_data;
2017-10-15 22:36:00 -04:00
+ struct virtwl_info *vi = vfd->vi;
+ struct fd vfd_files[VIRTWL_SEND_MAX_ALLOCS] = { { 0 } };
+ struct virtwl_vfd *vfds[VIRTWL_SEND_MAX_ALLOCS] = { 0 };
2019-09-20 17:55:12 -04:00
+#ifdef SEND_VIRTGPU_RESOURCES
+ struct dma_buf *virtgpu_dma_bufs[VIRTWL_SEND_MAX_ALLOCS] = { 0 };
2023-01-26 15:25:28 -05:00
+ struct dma_fence *virtgpu_dma_fence[VIRTWL_SEND_MAX_ALLOCS] = { 0 };
2019-09-20 17:55:12 -04:00
+ bool foreign_id = false;
+#endif
2017-10-15 22:36:00 -04:00
+ size_t vfd_count = 0;
2019-09-20 17:55:12 -04:00
+ size_t vfd_ids_size;
+ size_t ctrl_send_size;
2017-10-15 22:36:00 -04:00
+ struct virtio_wl_ctrl_vfd_send *ctrl_send;
2019-09-20 17:55:12 -04:00
+ u8 *vfd_ids;
2017-10-15 22:36:00 -04:00
+ u8 *out_buffer;
+ struct completion finish_completion;
+ struct scatterlist out_sg;
+ struct scatterlist in_sg;
2023-01-26 15:25:28 -05:00
+ struct sg_table sgt;
+ struct vm_struct *area;
+ bool vmalloced;
2017-10-15 22:36:00 -04:00
+ int ret;
+ int i;
+
+ if (vfd_fds) {
+ for (i = 0; i < VIRTWL_SEND_MAX_ALLOCS; i++) {
+ struct fd vfd_file;
+ int fd = vfd_fds[i];
2019-09-20 17:55:12 -04:00
+
2017-10-15 22:36:00 -04:00
+ if (fd < 0)
+ break;
+
+ vfd_file = fdget(vfd_fds[i]);
+ if (!vfd_file.file) {
+ ret = -EBADFD;
+ goto put_files;
+ }
+
2019-09-20 17:55:12 -04:00
+ if (vfd_file.file->f_op == &virtwl_vfd_fops) {
+ vfd_files[i] = vfd_file;
+
+ vfds[i] = vfd_file.file->private_data;
+ if (vfds[i] && vfds[i]->id) {
+ vfd_count++;
+ continue;
+ }
2017-10-15 22:36:00 -04:00
+
+ ret = -EINVAL;
+ goto put_files;
2019-09-20 17:55:12 -04:00
+ } else {
+ struct dma_buf *dma_buf = ERR_PTR(-EINVAL);
2023-01-26 15:25:28 -05:00
+ struct dma_fence *dma_fence = ERR_PTR(-EINVAL);
+ bool handled = false;
+
2019-09-20 17:55:12 -04:00
+#ifdef SEND_VIRTGPU_RESOURCES
+ dma_buf = dma_buf_get(vfd_fds[i]);
2023-01-26 15:25:28 -05:00
+ dma_fence = vi->use_send_vfd_v2
+ ? sync_file_get_fence(vfd_fds[i])
+ : ERR_PTR(-EINVAL);
+ handled = !IS_ERR(dma_buf) ||
+ !IS_ERR(dma_fence);
+
2019-09-20 17:55:12 -04:00
+ if (!IS_ERR(dma_buf)) {
+ virtgpu_dma_bufs[i] = dma_buf;
2023-01-26 15:25:28 -05:00
+ } else {
+ virtgpu_dma_fence[i] = dma_fence;
2019-09-20 17:55:12 -04:00
+ }
2023-01-26 15:25:28 -05:00
+
+ foreign_id = true;
+ vfd_count++;
2019-09-20 17:55:12 -04:00
+#endif
+ fdput(vfd_file);
2023-01-26 15:25:28 -05:00
+ if (!handled) {
+ ret = IS_ERR(dma_buf) ?
+ PTR_ERR(dma_buf) :
+ PTR_ERR(dma_fence);
+ goto put_files;
+ }
2017-10-15 22:36:00 -04:00
+ }
+ }
+ }
+
2019-09-20 17:55:12 -04:00
+ /* Empty writes always succeed. */
+ if (len == 0 && vfd_count == 0)
+ return 0;
+
+ vfd_ids_size = vfd_count * sizeof(__le32);
+#ifdef SEND_VIRTGPU_RESOURCES
+ if (foreign_id) {
2023-01-26 15:25:28 -05:00
+ vfd_ids_size = vfd_count * (vi->use_send_vfd_v2
+ ? sizeof(struct virtio_wl_ctrl_vfd_send_vfd_v2)
+ : sizeof(struct virtio_wl_ctrl_vfd_send_vfd));
2019-09-20 17:55:12 -04:00
+ }
+#endif
+ ctrl_send_size = sizeof(*ctrl_send) + vfd_ids_size + len;
2023-01-26 15:25:28 -05:00
+ vmalloced = false;
+ if (ctrl_send_size < PAGE_SIZE)
+ ctrl_send = kzalloc(ctrl_send_size, GFP_KERNEL);
+ else {
+ vmalloced = true;
+ ctrl_send = vzalloc(ctrl_send_size);
+ }
2017-10-15 22:36:00 -04:00
+ if (!ctrl_send) {
+ ret = -ENOMEM;
+ goto put_files;
+ }
+
2019-09-20 17:55:12 -04:00
+ vfd_ids = (u8 *)ctrl_send + sizeof(*ctrl_send);
+ out_buffer = (u8 *)ctrl_send + ctrl_send_size - len;
2017-10-15 22:36:00 -04:00
+
+ ctrl_send->hdr.type = VIRTIO_WL_CMD_VFD_SEND;
2019-09-20 17:55:12 -04:00
+#ifdef SEND_VIRTGPU_RESOURCES
+ if (foreign_id) {
2023-01-26 15:25:28 -05:00
+ struct virtio_wl_ctrl_vfd_send_vfd *v1 = NULL;
+ struct virtio_wl_ctrl_vfd_send_vfd_v2 *v2 = NULL;
+
+ if (vi->use_send_vfd_v2)
+ v2 = (struct virtio_wl_ctrl_vfd_send_vfd_v2 *) vfd_ids;
+ else
+ v1 = (struct virtio_wl_ctrl_vfd_send_vfd *) vfd_ids;
+
2019-09-20 17:55:12 -04:00
+ ctrl_send->hdr.type = VIRTIO_WL_CMD_VFD_SEND_FOREIGN_ID;
2023-01-26 15:25:28 -05:00
+ ret = encode_vfd_ids_foreign(vfds,
+ virtgpu_dma_bufs, virtgpu_dma_fence, vfd_count,
+ v1, v2);
2019-09-20 17:55:12 -04:00
+ } else {
+ ret = encode_vfd_ids(vfds, vfd_count, (__le32 *)vfd_ids);
+ }
+#else
+ ret = encode_vfd_ids(vfds, vfd_count, (__le32 *)vfd_ids);
+#endif
+ if (ret)
+ goto free_ctrl_send;
2017-10-15 22:36:00 -04:00
+ ctrl_send->vfd_id = vfd->id;
+ ctrl_send->vfd_count = vfd_count;
+
2019-09-20 17:55:12 -04:00
+ if (copy_from_user(out_buffer, buffer, len)) {
+ ret = -EFAULT;
2017-10-15 22:36:00 -04:00
+ goto free_ctrl_send;
2019-09-20 17:55:12 -04:00
+ }
2017-10-15 22:36:00 -04:00
+
+ init_completion(&finish_completion);
2023-01-26 15:25:28 -05:00
+ if (!vmalloced) {
+ sg_init_one(&out_sg, ctrl_send, ctrl_send_size);
+ sg_init_one(&in_sg, ctrl_send,
+ sizeof(struct virtio_wl_ctrl_hdr));
+ ret = vq_queue_out(vi, &out_sg, &in_sg, &finish_completion,
+ filp->f_flags & O_NONBLOCK);
+ } else {
+ area = find_vm_area(ctrl_send);
+ ret = sg_alloc_table_from_pages(&sgt, area->pages,
+ area->nr_pages, 0, ctrl_send_size, GFP_KERNEL);
+ if (ret)
+ goto free_ctrl_send;
2017-10-15 22:36:00 -04:00
+
2023-01-26 15:25:28 -05:00
+ sg_init_table(&in_sg, 1);
+ sg_set_page(&in_sg, area->pages[0],
+ sizeof(struct virtio_wl_ctrl_hdr), 0);
+
+ ret = vq_queue_out(vi, sgt.sgl, &in_sg, &finish_completion,
+ filp->f_flags & O_NONBLOCK);
+ }
2017-10-15 22:36:00 -04:00
+ if (ret)
2023-01-26 15:25:28 -05:00
+ goto free_sgt;
2017-10-15 22:36:00 -04:00
+
+ wait_for_completion(&finish_completion);
+
+ ret = virtwl_resp_err(ctrl_send->hdr.type);
+
2023-01-26 15:25:28 -05:00
+free_sgt:
+ if (vmalloced)
+ sg_free_table(&sgt);
2017-10-15 22:36:00 -04:00
+free_ctrl_send:
2023-01-26 15:25:28 -05:00
+ kvfree(ctrl_send);
2017-10-15 22:36:00 -04:00
+put_files:
+ for (i = 0; i < VIRTWL_SEND_MAX_ALLOCS; i++) {
2019-09-20 17:55:12 -04:00
+ if (vfd_files[i].file)
+ fdput(vfd_files[i]);
+#ifdef SEND_VIRTGPU_RESOURCES
+ if (virtgpu_dma_bufs[i])
+ dma_buf_put(virtgpu_dma_bufs[i]);
2023-01-26 15:25:28 -05:00
+ if (virtgpu_dma_fence[i])
+ dma_fence_put(virtgpu_dma_fence[i]);
2019-09-20 17:55:12 -04:00
+#endif
2017-10-15 22:36:00 -04:00
+ }
+ return ret;
+}
+
2019-09-20 17:55:12 -04:00
+static int virtwl_vfd_dmabuf_sync(struct file *filp, u32 flags)
+{
+ struct virtio_wl_ctrl_vfd_dmabuf_sync *ctrl_dmabuf_sync;
+ struct virtwl_vfd *vfd = filp->private_data;
+ struct virtwl_info *vi = vfd->vi;
+ struct completion finish_completion;
+ struct scatterlist out_sg;
+ struct scatterlist in_sg;
+ int ret = 0;
+
+ ctrl_dmabuf_sync = kzalloc(sizeof(*ctrl_dmabuf_sync), GFP_KERNEL);
+ if (!ctrl_dmabuf_sync)
+ return -ENOMEM;
+
+ ctrl_dmabuf_sync->hdr.type = VIRTIO_WL_CMD_VFD_DMABUF_SYNC;
+ ctrl_dmabuf_sync->vfd_id = vfd->id;
+ ctrl_dmabuf_sync->flags = flags;
+
+ sg_init_one(&out_sg, &ctrl_dmabuf_sync->hdr,
+ sizeof(struct virtio_wl_ctrl_vfd_dmabuf_sync));
+ sg_init_one(&in_sg, &ctrl_dmabuf_sync->hdr,
2019-09-20 17:55:12 -04:00
+ sizeof(struct virtio_wl_ctrl_hdr));
+
+ init_completion(&finish_completion);
+ ret = vq_queue_out(vi, &out_sg, &in_sg, &finish_completion,
+ false /* block */);
+ if (ret) {
+ pr_warn("virtwl: failed to queue dmabuf sync vfd id %u: %d\n",
+ vfd->id,
+ ret);
+ goto free_ctrl_dmabuf_sync;
+ }
+
+ wait_for_completion(&finish_completion);
+
+free_ctrl_dmabuf_sync:
+ kfree(ctrl_dmabuf_sync);
+ return ret;
+}
+
+static ssize_t virtwl_vfd_read(struct file *filp, char __user *buffer,
+ size_t size, loff_t *pos)
+{
+ return virtwl_vfd_recv(filp, buffer, size, NULL, NULL);
+}
+
+static ssize_t virtwl_vfd_write(struct file *filp, const char __user *buffer,
+ size_t size, loff_t *pos)
+{
+ int ret = 0;
+
+ if (size > U32_MAX)
+ size = U32_MAX;
+
+ ret = virtwl_vfd_send(filp, buffer, size, NULL);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static int virtwl_vfd_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct virtwl_vfd *vfd = filp->private_data;
+ unsigned long vm_size = vma->vm_end - vma->vm_start;
+ int ret = 0;
+
+ mutex_lock(&vfd->lock);
+
+ if (!vfd->pfn) {
+ ret = -EACCES;
+ goto out_unlock;
+ }
+
+ if (vm_size + (vma->vm_pgoff << PAGE_SHIFT) > PAGE_ALIGN(vfd->size)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = io_remap_pfn_range(vma, vma->vm_start, vfd->pfn, vm_size,
+ vma->vm_page_prot);
+ if (ret)
+ goto out_unlock;
+
+ vma->vm_flags |= VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+
+out_unlock:
+ mutex_unlock(&vfd->lock);
+ return ret;
+}
+
+static unsigned int virtwl_vfd_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct virtwl_vfd *vfd = filp->private_data;
+ struct virtwl_info *vi = vfd->vi;
+ unsigned int mask = 0;
+
+ mutex_lock(&vi->vq_locks[VIRTWL_VQ_OUT]);
+ poll_wait(filp, &vi->out_waitq, wait);
+ if (vi->vqs[VIRTWL_VQ_OUT]->num_free)
+ mask |= POLLOUT | POLLWRNORM;
+ mutex_unlock(&vi->vq_locks[VIRTWL_VQ_OUT]);
+
+ mutex_lock(&vfd->lock);
+ poll_wait(filp, &vfd->in_waitq, wait);
+ if (!list_empty(&vfd->in_queue))
+ mask |= POLLIN | POLLRDNORM;
+ if (vfd->hungup)
+ mask |= POLLHUP;
+ mutex_unlock(&vfd->lock);
+
+ return mask;
+}
+
+static int virtwl_vfd_release(struct inode *inodep, struct file *filp)
+{
+ struct virtwl_vfd *vfd = filp->private_data;
+ uint32_t vfd_id = vfd->id;
+ int ret;
+
+ /*
+ * If release is called, filp must be out of references and we have the
+ * last reference.
+ */
+ ret = do_vfd_close(vfd);
+ if (ret)
+ pr_warn("virtwl: failed to release vfd id %u: %d\n", vfd_id,
+ ret);
+ return 0;
+}
+
+static int virtwl_open(struct inode *inodep, struct file *filp)
+{
+ struct virtwl_info *vi = container_of(inodep->i_cdev,
+ struct virtwl_info, cdev);
+
+ filp->private_data = vi;
+
+ return 0;
+}
+
+static struct virtwl_vfd *do_new(struct virtwl_info *vi,
+ struct virtwl_ioctl_new *ioctl_new,
+ size_t ioctl_new_size, bool nonblock)
2017-10-15 22:36:00 -04:00
+{
+ struct virtio_wl_ctrl_vfd_new *ctrl_new;
+ struct virtwl_vfd *vfd;
+ struct completion finish_completion;
+ struct scatterlist out_sg;
+ struct scatterlist in_sg;
+ int ret = 0;
+
2019-09-20 17:55:12 -04:00
+ if (ioctl_new->type != VIRTWL_IOCTL_NEW_CTX &&
2023-01-26 15:25:28 -05:00
+ ioctl_new->type != VIRTWL_IOCTL_NEW_CTX_NAMED &&
2019-09-20 17:55:12 -04:00
+ ioctl_new->type != VIRTWL_IOCTL_NEW_ALLOC &&
+ ioctl_new->type != VIRTWL_IOCTL_NEW_PIPE_READ &&
+ ioctl_new->type != VIRTWL_IOCTL_NEW_PIPE_WRITE &&
+ ioctl_new->type != VIRTWL_IOCTL_NEW_DMABUF)
2017-10-15 22:36:00 -04:00
+ return ERR_PTR(-EINVAL);
+
+ ctrl_new = kzalloc(sizeof(*ctrl_new), GFP_KERNEL);
+ if (!ctrl_new)
+ return ERR_PTR(-ENOMEM);
+
+ vfd = virtwl_vfd_alloc(vi);
+ if (!vfd) {
+ ret = -ENOMEM;
+ goto free_ctrl_new;
+ }
+
2019-09-20 17:55:12 -04:00
+ mutex_lock(&vi->vfds_lock);
2017-10-15 22:36:00 -04:00
+ /*
+ * Take the lock before adding it to the vfds list where others might
+ * reference it.
+ */
+ mutex_lock(&vfd->lock);
+ ret = idr_alloc(&vi->vfds, vfd, 1, VIRTWL_MAX_ALLOC, GFP_KERNEL);
+ mutex_unlock(&vi->vfds_lock);
+ if (ret <= 0)
2019-09-20 17:55:12 -04:00
+ goto remove_vfd;
2017-10-15 22:36:00 -04:00
+
+ vfd->id = ret;
+ ret = 0;
+
+ ctrl_new->vfd_id = vfd->id;
2019-09-20 17:55:12 -04:00
+ switch (ioctl_new->type) {
2017-10-15 22:36:00 -04:00
+ case VIRTWL_IOCTL_NEW_CTX:
+ ctrl_new->hdr.type = VIRTIO_WL_CMD_VFD_NEW_CTX;
2019-09-20 17:55:12 -04:00
+ ctrl_new->flags = VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ;
2017-10-15 22:36:00 -04:00
+ break;
2023-01-26 15:25:28 -05:00
+ case VIRTWL_IOCTL_NEW_CTX_NAMED:
+ ctrl_new->hdr.type = VIRTIO_WL_CMD_VFD_NEW_CTX_NAMED;
+ ctrl_new->flags = VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ;
+ memcpy(ctrl_new->name, ioctl_new->name, sizeof(ctrl_new->name));
+ break;
2017-10-15 22:36:00 -04:00
+ case VIRTWL_IOCTL_NEW_ALLOC:
+ ctrl_new->hdr.type = VIRTIO_WL_CMD_VFD_NEW;
2019-09-20 17:55:12 -04:00
+ ctrl_new->size = PAGE_ALIGN(ioctl_new->size);
2017-10-15 22:36:00 -04:00
+ break;
2019-09-20 17:55:12 -04:00
+ case VIRTWL_IOCTL_NEW_PIPE_READ:
+ ctrl_new->hdr.type = VIRTIO_WL_CMD_VFD_NEW_PIPE;
+ ctrl_new->flags = VIRTIO_WL_VFD_READ;
+ break;
+ case VIRTWL_IOCTL_NEW_PIPE_WRITE:
+ ctrl_new->hdr.type = VIRTIO_WL_CMD_VFD_NEW_PIPE;
+ ctrl_new->flags = VIRTIO_WL_VFD_WRITE;
+ break;
+ case VIRTWL_IOCTL_NEW_DMABUF:
+ /* Make sure ioctl_new contains enough data for NEW_DMABUF. */
+ if (ioctl_new_size == sizeof(*ioctl_new)) {
+ ctrl_new->hdr.type = VIRTIO_WL_CMD_VFD_NEW_DMABUF;
+ /* FIXME: convert from host byte order. */
+ memcpy(&ctrl_new->dmabuf, &ioctl_new->dmabuf,
+ sizeof(ioctl_new->dmabuf));
+ break;
+ }
2023-01-26 15:25:28 -05:00
+ fallthrough;
2017-10-15 22:36:00 -04:00
+ default:
+ ret = -EINVAL;
+ goto remove_vfd;
+ }
+
+ init_completion(&finish_completion);
+ sg_init_one(&out_sg, ctrl_new, sizeof(*ctrl_new));
+ sg_init_one(&in_sg, ctrl_new, sizeof(*ctrl_new));
+
+ ret = vq_queue_out(vi, &out_sg, &in_sg, &finish_completion, nonblock);
+ if (ret)
+ goto remove_vfd;
+
+ wait_for_completion(&finish_completion);
+
+ ret = virtwl_resp_err(ctrl_new->hdr.type);
+ if (ret)
+ goto remove_vfd;
+
+ vfd->size = ctrl_new->size;
+ vfd->pfn = ctrl_new->pfn;
+ vfd->flags = ctrl_new->flags;
+
+ mutex_unlock(&vfd->lock);
+
2019-09-20 17:55:12 -04:00
+ if (ioctl_new->type == VIRTWL_IOCTL_NEW_DMABUF) {
+ /* FIXME: convert to host byte order. */
+ memcpy(&ioctl_new->dmabuf, &ctrl_new->dmabuf,
+ sizeof(ctrl_new->dmabuf));
+ }
+
2017-10-15 22:36:00 -04:00
+ kfree(ctrl_new);
+ return vfd;
+
+remove_vfd:
2019-09-20 17:55:12 -04:00
+ /*
+ * unlock the vfd to avoid deadlock when unlinking it
+ * or freeing a held lock
+ */
2017-10-15 22:36:00 -04:00
+ mutex_unlock(&vfd->lock);
2019-09-20 17:55:12 -04:00
+ /* this is safe since the id cannot change after the vfd is created */
+ if (vfd->id)
+ virtwl_vfd_lock_unlink(vfd);
2017-10-15 22:36:00 -04:00
+ virtwl_vfd_free(vfd);
+free_ctrl_new:
+ kfree(ctrl_new);
+ return ERR_PTR(ret);
+}
+
2019-09-20 17:55:12 -04:00
+static long virtwl_ioctl_send(struct file *filp, void __user *ptr)
2017-10-15 22:36:00 -04:00
+{
2019-09-20 17:55:12 -04:00
+ struct virtwl_ioctl_txn ioctl_send;
+ void __user *user_data = ptr + sizeof(struct virtwl_ioctl_txn);
2017-10-15 22:36:00 -04:00
+ int ret;
+
2019-09-20 17:55:12 -04:00
+ ret = copy_from_user(&ioctl_send, ptr, sizeof(struct virtwl_ioctl_txn));
2017-10-15 22:36:00 -04:00
+ if (ret)
+ return -EFAULT;
+
+ /* Early check for user error; do_send still uses copy_from_user. */
2019-09-20 17:55:12 -04:00
+ ret = !access_ok(user_data, ioctl_send.len);
2017-10-15 22:36:00 -04:00
+ if (ret)
+ return -EFAULT;
+
2019-09-20 17:55:12 -04:00
+ return virtwl_vfd_send(filp, user_data, ioctl_send.len, ioctl_send.fds);
2017-10-15 22:36:00 -04:00
+}
+
2019-09-20 17:55:12 -04:00
+static long virtwl_ioctl_recv(struct file *filp, void __user *ptr)
2017-10-15 22:36:00 -04:00
+{
2019-09-20 17:55:12 -04:00
+ struct virtwl_ioctl_txn ioctl_recv;
+ void __user *user_data = ptr + sizeof(struct virtwl_ioctl_txn);
+ int __user *user_fds = (int __user *)ptr;
2017-10-15 22:36:00 -04:00
+ size_t vfd_count = VIRTWL_SEND_MAX_ALLOCS;
+ struct virtwl_vfd *vfds[VIRTWL_SEND_MAX_ALLOCS] = { 0 };
+ int fds[VIRTWL_SEND_MAX_ALLOCS];
+ size_t i;
+ int ret = 0;
+
+ for (i = 0; i < VIRTWL_SEND_MAX_ALLOCS; i++)
+ fds[i] = -1;
+
2019-09-20 17:55:12 -04:00
+ ret = copy_from_user(&ioctl_recv, ptr, sizeof(struct virtwl_ioctl_txn));
2017-10-15 22:36:00 -04:00
+ if (ret)
+ return -EFAULT;
+
+ /* Early check for user error. */
2019-09-20 17:55:12 -04:00
+ ret = !access_ok(user_data, ioctl_recv.len);
2017-10-15 22:36:00 -04:00
+ if (ret)
+ return -EFAULT;
+
+ ret = virtwl_vfd_recv(filp, user_data, ioctl_recv.len, vfds,
+ &vfd_count);
+ if (ret < 0)
+ return ret;
+
2019-09-20 17:55:12 -04:00
+ ret = copy_to_user(&((struct virtwl_ioctl_txn __user *)ptr)->len, &ret,
2017-10-15 22:36:00 -04:00
+ sizeof(ioctl_recv.len));
+ if (ret) {
+ ret = -EFAULT;
+ goto free_vfds;
+ }
+
+ for (i = 0; i < vfd_count; i++) {
+ ret = anon_inode_getfd("[virtwl_vfd]", &virtwl_vfd_fops,
2019-09-20 17:55:12 -04:00
+ vfds[i], virtwl_vfd_file_flags(vfds[i])
+ | O_CLOEXEC);
+ if (ret < 0)
2017-10-15 22:36:00 -04:00
+ goto free_vfds;
2019-09-20 17:55:12 -04:00
+
2017-10-15 22:36:00 -04:00
+ vfds[i] = NULL;
+ fds[i] = ret;
+ }
+
+ ret = copy_to_user(user_fds, fds, sizeof(int) * VIRTWL_SEND_MAX_ALLOCS);
+ if (ret) {
+ ret = -EFAULT;
+ goto free_vfds;
+ }
+
+ return 0;
+
+free_vfds:
+ for (i = 0; i < vfd_count; i++) {
+ if (vfds[i])
+ do_vfd_close(vfds[i]);
+ if (fds[i] >= 0)
2023-01-26 15:25:28 -05:00
+ close_fd(fds[i]);
2017-10-15 22:36:00 -04:00
+ }
+ return ret;
+}
+
2019-09-20 17:55:12 -04:00
+static long virtwl_ioctl_dmabuf_sync(struct file *filp, void __user *ptr)
+{
+ struct virtwl_ioctl_dmabuf_sync ioctl_dmabuf_sync;
+ int ret;
+
+ ret = copy_from_user(&ioctl_dmabuf_sync, ptr,
+ sizeof(struct virtwl_ioctl_dmabuf_sync));
+ if (ret)
+ return -EFAULT;
+
+ if (ioctl_dmabuf_sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
+ return -EINVAL;
+
+ return virtwl_vfd_dmabuf_sync(filp, ioctl_dmabuf_sync.flags);
+}
+
2017-10-15 22:36:00 -04:00
+static long virtwl_vfd_ioctl(struct file *filp, unsigned int cmd,
2019-09-20 17:55:12 -04:00
+ void __user *ptr)
2017-10-15 22:36:00 -04:00
+{
+ switch (cmd) {
+ case VIRTWL_IOCTL_SEND:
2019-09-20 17:55:12 -04:00
+ return virtwl_ioctl_send(filp, ptr);
2017-10-15 22:36:00 -04:00
+ case VIRTWL_IOCTL_RECV:
2019-09-20 17:55:12 -04:00
+ return virtwl_ioctl_recv(filp, ptr);
+ case VIRTWL_IOCTL_DMABUF_SYNC:
+ return virtwl_ioctl_dmabuf_sync(filp, ptr);
2017-10-15 22:36:00 -04:00
+ default:
+ return -ENOTTY;
+ }
+}
+
2019-09-20 17:55:12 -04:00
+static long virtwl_ioctl_new(struct file *filp, void __user *ptr,
+ size_t in_size)
2017-10-15 22:36:00 -04:00
+{
+ struct virtwl_info *vi = filp->private_data;
+ struct virtwl_vfd *vfd;
2019-09-20 17:55:12 -04:00
+ struct virtwl_ioctl_new ioctl_new = {};
+ size_t size = min(in_size, sizeof(ioctl_new));
2017-10-15 22:36:00 -04:00
+ int ret;
+
2019-09-20 17:55:12 -04:00
+ /* Early check for user error. */
2023-01-26 15:25:28 -05:00
+ ret = !access_ok(ptr, size);
2017-10-15 22:36:00 -04:00
+ if (ret)
+ return -EFAULT;
+
2019-09-20 17:55:12 -04:00
+ ret = copy_from_user(&ioctl_new, ptr, size);
+ if (ret)
+ return -EFAULT;
2017-10-15 22:36:00 -04:00
+
2019-09-20 17:55:12 -04:00
+ vfd = do_new(vi, &ioctl_new, size, filp->f_flags & O_NONBLOCK);
2017-10-15 22:36:00 -04:00
+ if (IS_ERR(vfd))
+ return PTR_ERR(vfd);
+
+ ret = anon_inode_getfd("[virtwl_vfd]", &virtwl_vfd_fops, vfd,
2019-09-20 17:55:12 -04:00
+ virtwl_vfd_file_flags(vfd) | O_CLOEXEC);
2017-10-15 22:36:00 -04:00
+ if (ret < 0) {
+ do_vfd_close(vfd);
+ return ret;
+ }
+
+ ioctl_new.fd = ret;
2019-09-20 17:55:12 -04:00
+ ret = copy_to_user(ptr, &ioctl_new, size);
2017-10-15 22:36:00 -04:00
+ if (ret) {
+ /* The release operation will handle freeing this alloc */
2023-01-26 15:25:28 -05:00
+ close_fd(ioctl_new.fd);
2017-10-15 22:36:00 -04:00
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
2019-09-20 17:55:12 -04:00
+static long virtwl_ioctl_ptr(struct file *filp, unsigned int cmd,
+ void __user *ptr)
2017-10-15 22:36:00 -04:00
+{
+ if (filp->f_op == &virtwl_vfd_fops)
2019-09-20 17:55:12 -04:00
+ return virtwl_vfd_ioctl(filp, cmd, ptr);
2017-10-15 22:36:00 -04:00
+
2019-09-20 17:55:12 -04:00
+ switch (_IOC_NR(cmd)) {
+ case _IOC_NR(VIRTWL_IOCTL_NEW):
+ return virtwl_ioctl_new(filp, ptr, _IOC_SIZE(cmd));
2017-10-15 22:36:00 -04:00
+ default:
+ return -ENOTTY;
+ }
+}
+
2019-09-20 17:55:12 -04:00
+static long virtwl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ return virtwl_ioctl_ptr(filp, cmd, (void __user *)arg);
+}
+
+#ifdef CONFIG_COMPAT
+static long virtwl_ioctl_compat(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ return virtwl_ioctl_ptr(filp, cmd, compat_ptr(arg));
+}
+#else
+#define virtwl_ioctl_compat NULL
+#endif
+
2017-10-15 22:36:00 -04:00
+static int virtwl_release(struct inode *inodep, struct file *filp)
+{
+ return 0;
+}
+
2019-09-20 17:55:12 -04:00
+static const struct file_operations virtwl_fops = {
2017-10-15 22:36:00 -04:00
+ .open = virtwl_open,
+ .unlocked_ioctl = virtwl_ioctl,
2019-09-20 17:55:12 -04:00
+ .compat_ioctl = virtwl_ioctl_compat,
2017-10-15 22:36:00 -04:00
+ .release = virtwl_release,
+};
+
2019-09-20 17:55:12 -04:00
+static const struct file_operations virtwl_vfd_fops = {
+ .read = virtwl_vfd_read,
+ .write = virtwl_vfd_write,
2017-10-15 22:36:00 -04:00
+ .mmap = virtwl_vfd_mmap,
+ .poll = virtwl_vfd_poll,
+ .unlocked_ioctl = virtwl_ioctl,
2019-09-20 17:55:12 -04:00
+ .compat_ioctl = virtwl_ioctl_compat,
2017-10-15 22:36:00 -04:00
+ .release = virtwl_vfd_release,
+};
+
+static int probe_common(struct virtio_device *vdev)
+{
+ int i;
+ int ret;
+ struct virtwl_info *vi = NULL;
+ vq_callback_t *vq_callbacks[] = { vq_in_cb, vq_out_cb };
2019-09-20 17:55:12 -04:00
+ static const char * const vq_names[] = { "in", "out" };
2017-10-15 22:36:00 -04:00
+
+ vi = kzalloc(sizeof(struct virtwl_info), GFP_KERNEL);
2019-09-20 17:55:12 -04:00
+ if (!vi)
2017-10-15 22:36:00 -04:00
+ return -ENOMEM;
+
+ vdev->priv = vi;
+
+ ret = alloc_chrdev_region(&vi->dev_num, 0, 1, "wl");
+ if (ret) {
+ ret = -ENOMEM;
2019-09-20 17:55:12 -04:00
+ pr_warn("virtwl: failed to allocate wl chrdev region: %d\n",
+ ret);
2017-10-15 22:36:00 -04:00
+ goto free_vi;
+ }
+
+ vi->class = class_create(THIS_MODULE, "wl");
+ if (IS_ERR(vi->class)) {
+ ret = PTR_ERR(vi->class);
2019-09-20 17:55:12 -04:00
+ pr_warn("virtwl: failed to create wl class: %d\n", ret);
2017-10-15 22:36:00 -04:00
+ goto unregister_region;
+
+ }
+
+ vi->dev = device_create(vi->class, NULL, vi->dev_num, vi, "wl%d", 0);
+ if (IS_ERR(vi->dev)) {
+ ret = PTR_ERR(vi->dev);
2019-09-20 17:55:12 -04:00
+ pr_warn("virtwl: failed to create wl0 device: %d\n", ret);
2017-10-15 22:36:00 -04:00
+ goto destroy_class;
+ }
+
+ cdev_init(&vi->cdev, &virtwl_fops);
+ ret = cdev_add(&vi->cdev, vi->dev_num, 1);
+ if (ret) {
2019-09-20 17:55:12 -04:00
+ pr_warn("virtwl: failed to add virtio wayland character device to system: %d\n",
+ ret);
2017-10-15 22:36:00 -04:00
+ goto destroy_device;
+ }
+
+ for (i = 0; i < VIRTWL_QUEUE_COUNT; i++)
+ mutex_init(&vi->vq_locks[i]);
+
2023-01-26 15:25:28 -05:00
+ ret = virtio_find_vqs(vdev, VIRTWL_QUEUE_COUNT, vi->vqs, vq_callbacks,
+ vq_names, NULL);
2017-10-15 22:36:00 -04:00
+ if (ret) {
2019-09-20 17:55:12 -04:00
+ pr_warn("virtwl: failed to find virtio wayland queues: %d\n",
+ ret);
2017-10-15 22:36:00 -04:00
+ goto del_cdev;
+ }
+
+ INIT_WORK(&vi->in_vq_work, vq_in_work_handler);
+ INIT_WORK(&vi->out_vq_work, vq_out_work_handler);
+ init_waitqueue_head(&vi->out_waitq);
+
+ mutex_init(&vi->vfds_lock);
+ idr_init(&vi->vfds);
+
2023-01-26 15:25:28 -05:00
+ vi->use_send_vfd_v2 = virtio_has_feature(vdev, VIRTIO_WL_F_SEND_FENCES);
+
2017-10-15 22:36:00 -04:00
+ /* lock is unneeded as we have unique ownership */
+ ret = vq_fill_locked(vi->vqs[VIRTWL_VQ_IN]);
+ if (ret) {
2019-09-20 17:55:12 -04:00
+ pr_warn("virtwl: failed to fill in virtqueue: %d", ret);
2017-10-15 22:36:00 -04:00
+ goto del_cdev;
+ }
+
+ virtio_device_ready(vdev);
+ virtqueue_kick(vi->vqs[VIRTWL_VQ_IN]);
+
+
+ return 0;
+
+del_cdev:
+ cdev_del(&vi->cdev);
+destroy_device:
+ put_device(vi->dev);
+destroy_class:
+ class_destroy(vi->class);
+unregister_region:
+ unregister_chrdev_region(vi->dev_num, 0);
+free_vi:
+ kfree(vi);
+ return ret;
+}
+
+static void remove_common(struct virtio_device *vdev)
+{
+ struct virtwl_info *vi = vdev->priv;
+
+ cdev_del(&vi->cdev);
+ put_device(vi->dev);
+ class_destroy(vi->class);
+ unregister_chrdev_region(vi->dev_num, 0);
+ kfree(vi);
+}
+
+static int virtwl_probe(struct virtio_device *vdev)
+{
+ return probe_common(vdev);
+}
+
+static void virtwl_remove(struct virtio_device *vdev)
+{
+ remove_common(vdev);
+}
+
+static void virtwl_scan(struct virtio_device *vdev)
+{
+}
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_WL, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
2019-09-20 17:55:12 -04:00
+static unsigned int features_legacy[] = {
+ VIRTIO_WL_F_TRANS_FLAGS
+};
+
+static unsigned int features[] = {
2023-01-26 15:25:28 -05:00
+ VIRTIO_WL_F_TRANS_FLAGS,
+ VIRTIO_WL_F_SEND_FENCES,
2019-09-20 17:55:12 -04:00
+};
+
2017-10-15 22:36:00 -04:00
+static struct virtio_driver virtio_wl_driver = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
2019-09-20 17:55:12 -04:00
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .feature_table_legacy = features_legacy,
+ .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
2017-10-15 22:36:00 -04:00
+ .probe = virtwl_probe,
+ .remove = virtwl_remove,
+ .scan = virtwl_scan,
+};
+
+module_virtio_driver(virtio_wl_driver);
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("Virtio wayland driver");
+MODULE_LICENSE("GPL");
2023-01-26 15:25:28 -05:00
diff -uprN linux-6.0.12/include/uapi/linux/virtio_ids.h linux-6.0.12-wl/include/uapi/linux/virtio_ids.h
--- linux-6.0.12/include/uapi/linux/virtio_ids.h 2022-12-08 05:30:22.000000000 -0500
+++ linux-6.0.12-wl/include/uapi/linux/virtio_ids.h 2022-12-30 13:31:58.237086721 -0500
@@ -81,4 +81,7 @@
#define VIRTIO_TRANS_ID_RNG 0x1005 /* transitional virtio rng */
#define VIRTIO_TRANS_ID_9P 0x1009 /* transitional virtio 9p console */
+#define VIRTIO_ID_WL 63 /* virtio wayland */
2019-09-20 17:55:12 -04:00
+
+
2017-10-15 22:36:00 -04:00
#endif /* _LINUX_VIRTIO_IDS_H */
2023-01-26 15:25:28 -05:00
diff -uprN linux-6.0.12/include/uapi/linux/virtio_wl.h linux-6.0.12-wl/include/uapi/linux/virtio_wl.h
--- linux-6.0.12/include/uapi/linux/virtio_wl.h 1969-12-31 19:00:00.000000000 -0500
+++ linux-6.0.12-wl/include/uapi/linux/virtio_wl.h 2022-12-30 13:15:42.000000000 -0500
@@ -0,0 +1,154 @@
2017-10-15 22:36:00 -04:00
+#ifndef _LINUX_VIRTIO_WL_H
+#define _LINUX_VIRTIO_WL_H
2019-09-20 17:55:12 -04:00
+/*
+ * This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ */
2017-10-15 22:36:00 -04:00
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/virtwl.h>
+
+#define VIRTWL_IN_BUFFER_SIZE 4096
+#define VIRTWL_OUT_BUFFER_SIZE 4096
+#define VIRTWL_VQ_IN 0
+#define VIRTWL_VQ_OUT 1
+#define VIRTWL_QUEUE_COUNT 2
+#define VIRTWL_MAX_ALLOC 0x800
+#define VIRTWL_PFN_SHIFT 12
+
2019-09-20 17:55:12 -04:00
+/* Enables the transition to new flag semantics */
+#define VIRTIO_WL_F_TRANS_FLAGS 1
2023-01-26 15:25:28 -05:00
+/* Enables send fence support with virtio_wl_ctrl_vfd_send_vfd_v2 */
+#define VIRTIO_WL_F_SEND_FENCES 2
2017-10-15 22:36:00 -04:00
+
2019-09-20 17:55:12 -04:00
+struct virtio_wl_config {
2017-10-15 22:36:00 -04:00
+};
+
+/*
+ * The structure of each of these is virtio_wl_ctrl_hdr or one of its subclasses
2019-09-20 17:55:12 -04:00
+ * where noted.
+ */
2017-10-15 22:36:00 -04:00
+enum virtio_wl_ctrl_type {
+ VIRTIO_WL_CMD_VFD_NEW = 0x100, /* virtio_wl_ctrl_vfd_new */
+ VIRTIO_WL_CMD_VFD_CLOSE, /* virtio_wl_ctrl_vfd */
+ VIRTIO_WL_CMD_VFD_SEND, /* virtio_wl_ctrl_vfd_send + data */
+ VIRTIO_WL_CMD_VFD_RECV, /* virtio_wl_ctrl_vfd_recv + data */
2019-09-20 17:55:12 -04:00
+ VIRTIO_WL_CMD_VFD_NEW_CTX, /* virtio_wl_ctrl_vfd_new */
+ VIRTIO_WL_CMD_VFD_NEW_PIPE, /* virtio_wl_ctrl_vfd_new */
+ VIRTIO_WL_CMD_VFD_HUP, /* virtio_wl_ctrl_vfd */
+ VIRTIO_WL_CMD_VFD_NEW_DMABUF, /* virtio_wl_ctrl_vfd_new */
+ VIRTIO_WL_CMD_VFD_DMABUF_SYNC, /* virtio_wl_ctrl_vfd_dmabuf_sync */
+ VIRTIO_WL_CMD_VFD_SEND_FOREIGN_ID, /* virtio_wl_ctrl_vfd_send + data */
2023-01-26 15:25:28 -05:00
+ VIRTIO_WL_CMD_VFD_NEW_CTX_NAMED, /* virtio_wl_ctrl_vfd_new */
2017-10-15 22:36:00 -04:00
+
+ VIRTIO_WL_RESP_OK = 0x1000,
+ VIRTIO_WL_RESP_VFD_NEW = 0x1001, /* virtio_wl_ctrl_vfd_new */
2019-09-20 17:55:12 -04:00
+ VIRTIO_WL_RESP_VFD_NEW_DMABUF = 0x1002, /* virtio_wl_ctrl_vfd_new */
2017-10-15 22:36:00 -04:00
+
+ VIRTIO_WL_RESP_ERR = 0x1100,
+ VIRTIO_WL_RESP_OUT_OF_MEMORY,
+ VIRTIO_WL_RESP_INVALID_ID,
+ VIRTIO_WL_RESP_INVALID_TYPE,
2019-09-20 17:55:12 -04:00
+ VIRTIO_WL_RESP_INVALID_FLAGS,
+ VIRTIO_WL_RESP_INVALID_CMD,
2017-10-15 22:36:00 -04:00
+};
+
+struct virtio_wl_ctrl_hdr {
+ __le32 type; /* one of virtio_wl_ctrl_type */
+ __le32 flags; /* always 0 */
+};
+
+enum virtio_wl_vfd_flags {
2019-09-20 17:55:12 -04:00
+ VIRTIO_WL_VFD_WRITE = 0x1, /* intended to be written by guest */
+ VIRTIO_WL_VFD_READ = 0x2, /* intended to be read by guest */
2017-10-15 22:36:00 -04:00
+};
+
+struct virtio_wl_ctrl_vfd {
+ struct virtio_wl_ctrl_hdr hdr;
+ __le32 vfd_id;
+};
+
+/*
+ * If this command is sent to the guest, it indicates that the VFD has been
+ * created and the fields indicate the properties of the VFD being offered.
+ *
+ * If this command is sent to the host, it represents a request to create a VFD
+ * of the given properties. The pfn field is ignored by the host.
+ */
+struct virtio_wl_ctrl_vfd_new {
+ struct virtio_wl_ctrl_hdr hdr;
+ __le32 vfd_id; /* MSB indicates device allocated vfd */
+ __le32 flags; /* virtio_wl_vfd_flags */
2019-09-20 17:55:12 -04:00
+ __le64 pfn; /* first guest physical page frame number if VFD_MAP */
+ __le32 size; /* size in bytes if VIRTIO_WL_CMD_VFD_NEW* */
2023-01-26 15:25:28 -05:00
+ union {
+ /* buffer description if VIRTIO_WL_CMD_VFD_NEW_DMABUF */
+ struct {
+ __le32 width; /* width in pixels */
+ __le32 height; /* height in pixels */
+ __le32 format; /* fourcc format */
+ __le32 stride0; /* return stride0 */
+ __le32 stride1; /* return stride1 */
+ __le32 stride2; /* return stride2 */
+ __le32 offset0; /* return offset0 */
+ __le32 offset1; /* return offset1 */
+ __le32 offset2; /* return offset2 */
+ } dmabuf;
+ /* name of socket if VIRTIO_WL_CMD_VFD_NEW_CTX_NAMED */
+ char name[32];
+ };
2019-09-20 17:55:12 -04:00
+};
+
+
+enum virtio_wl_ctrl_vfd_send_kind {
+ /* The id after this one indicates an ordinary vfd_id. */
+ VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL,
+ /* The id after this one is a virtio-gpu resource id. */
+ VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU,
2023-01-26 15:25:28 -05:00
+ VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE,
+ VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE,
2019-09-20 17:55:12 -04:00
+};
+
+struct virtio_wl_ctrl_vfd_send_vfd {
+ __le32 kind; /* virtio_wl_ctrl_vfd_send_kind */
+ __le32 id;
2017-10-15 22:36:00 -04:00
+};
+
2023-01-26 15:25:28 -05:00
+struct virtio_wl_ctrl_vfd_send_vfd_v2 {
+ __le32 kind; /* virtio_wl_ctrl_vfd_send_kind */
+ union {
+ /* For KIND_LOCAL and KIND_VIRTGPU */
+ __le32 id;
+ /* For KIND_VIRTGPU_FENCE */
+ __le64 seqno;
+ };
+};
+
2017-10-15 22:36:00 -04:00
+struct virtio_wl_ctrl_vfd_send {
+ struct virtio_wl_ctrl_hdr hdr;
+ __le32 vfd_id;
+ __le32 vfd_count; /* struct is followed by this many IDs */
2019-09-20 17:55:12 -04:00
+
+ /*
+ * If hdr.type == VIRTIO_WL_CMD_VFD_SEND_FOREIGN_ID, there is a
+ * vfd_count array of virtio_wl_ctrl_vfd_send_vfd. Otherwise, there is a
+ * vfd_count array of vfd_ids.
+ */
+
2017-10-15 22:36:00 -04:00
+ /* the remainder is raw data */
+};
+
+struct virtio_wl_ctrl_vfd_recv {
+ struct virtio_wl_ctrl_hdr hdr;
+ __le32 vfd_id;
+ __le32 vfd_count; /* struct is followed by this many IDs */
+ /* the remainder is raw data */
+};
+
2019-09-20 17:55:12 -04:00
+struct virtio_wl_ctrl_vfd_dmabuf_sync {
+ struct virtio_wl_ctrl_hdr hdr;
+ __le32 vfd_id;
+ __le32 flags;
+};
+
2017-10-15 22:36:00 -04:00
+#endif /* _LINUX_VIRTIO_WL_H */
2023-01-26 15:25:28 -05:00
diff -uprN linux-6.0.12/include/uapi/linux/virtwl.h linux-6.0.12-wl/include/uapi/linux/virtwl.h
--- linux-6.0.12/include/uapi/linux/virtwl.h 1969-12-31 19:00:00.000000000 -0500
+++ linux-6.0.12-wl/include/uapi/linux/virtwl.h 2022-12-30 13:15:42.000000000 -0500
@@ -0,0 +1,67 @@
2017-10-15 22:36:00 -04:00
+#ifndef _LINUX_VIRTWL_H
+#define _LINUX_VIRTWL_H
+
+#include <asm/ioctl.h>
2019-09-20 17:55:12 -04:00
+#include <linux/types.h>
2017-10-15 22:36:00 -04:00
+
2019-09-20 17:55:12 -04:00
+#define VIRTWL_SEND_MAX_ALLOCS 28
2017-10-15 22:36:00 -04:00
+
+#define VIRTWL_IOCTL_BASE 'w'
2019-09-20 17:55:12 -04:00
+#define VIRTWL_IO(nr) _IO(VIRTWL_IOCTL_BASE, nr)
+#define VIRTWL_IOR(nr, type) _IOR(VIRTWL_IOCTL_BASE, nr, type)
+#define VIRTWL_IOW(nr, type) _IOW(VIRTWL_IOCTL_BASE, nr, type)
+#define VIRTWL_IOWR(nr, type) _IOWR(VIRTWL_IOCTL_BASE, nr, type)
2017-10-15 22:36:00 -04:00
+
+enum virtwl_ioctl_new_type {
2019-09-20 17:55:12 -04:00
+ VIRTWL_IOCTL_NEW_CTX, /* open a new wayland connection context */
+ VIRTWL_IOCTL_NEW_ALLOC, /* create a new virtwl shm allocation */
+ /* create a new virtwl pipe that is readable via the returned fd */
+ VIRTWL_IOCTL_NEW_PIPE_READ,
+ /* create a new virtwl pipe that is writable via the returned fd */
+ VIRTWL_IOCTL_NEW_PIPE_WRITE,
+ /* create a new virtwl dmabuf that is writable via the returned fd */
+ VIRTWL_IOCTL_NEW_DMABUF,
2023-01-26 15:25:28 -05:00
+ VIRTWL_IOCTL_NEW_CTX_NAMED, /* open a new named connection context */
2017-10-15 22:36:00 -04:00
+};
+
+struct virtwl_ioctl_new {
2019-09-20 17:55:12 -04:00
+ __u32 type; /* VIRTWL_IOCTL_NEW_* */
+ int fd; /* return fd */
+ __u32 flags; /* currently always 0 */
+ union {
+ /* size of allocation if type == VIRTWL_IOCTL_NEW_ALLOC */
+ __u32 size;
+ /* buffer description if type == VIRTWL_IOCTL_NEW_DMABUF */
+ struct {
+ __u32 width; /* width in pixels */
+ __u32 height; /* height in pixels */
+ __u32 format; /* fourcc format */
+ __u32 stride0; /* return stride0 */
+ __u32 stride1; /* return stride1 */
+ __u32 stride2; /* return stride2 */
+ __u32 offset0; /* return offset0 */
+ __u32 offset1; /* return offset1 */
+ __u32 offset2; /* return offset2 */
+ } dmabuf;
2023-01-26 15:25:28 -05:00
+ /* name of socket if type == VIRTIO_WL_CMD_VFD_NEW_CTX_NAMED */
+ char name[32];
2019-09-20 17:55:12 -04:00
+ };
2017-10-15 22:36:00 -04:00
+};
+
2019-09-20 17:55:12 -04:00
+struct virtwl_ioctl_txn {
2017-10-15 22:36:00 -04:00
+ int fds[VIRTWL_SEND_MAX_ALLOCS];
2019-09-20 17:55:12 -04:00
+ __u32 len;
+ __u8 data[0];
2017-10-15 22:36:00 -04:00
+};
+
2019-09-20 17:55:12 -04:00
+struct virtwl_ioctl_dmabuf_sync {
+ __u32 flags; /* synchronization flags (see dma-buf.h) */
2017-10-15 22:36:00 -04:00
+};
+
+#define VIRTWL_IOCTL_NEW VIRTWL_IOWR(0x00, struct virtwl_ioctl_new)
2019-09-20 17:55:12 -04:00
+#define VIRTWL_IOCTL_SEND VIRTWL_IOR(0x01, struct virtwl_ioctl_txn)
+#define VIRTWL_IOCTL_RECV VIRTWL_IOW(0x02, struct virtwl_ioctl_txn)
+#define VIRTWL_IOCTL_DMABUF_SYNC VIRTWL_IOR(0x03, \
+ struct virtwl_ioctl_dmabuf_sync)
2017-10-15 22:36:00 -04:00
+
+#endif /* _LINUX_VIRTWL_H */