From 86d17c869fded7817529b2cbcc81c83e9711b323 Mon Sep 17 00:00:00 2001 From: Weifeng Liu Date: Tue, 10 Dec 2024 05:29:07 +0000 Subject: [PATCH] drm/virtgio: Send flip event based on backend setting Cache out fences in atomic commit and signal them when the backend asks to. The backend is supposed to send to frontend the number of fences to signal when the backend presents a frame to users. Only in this way we can preserve the semantics of present fence. Tracked-On: OAM-128370 Signed-off-by: hangliu1 Signed-off-by: Weifeng Liu --- drivers/gpu/drm/virtio/virtgpu_display.c | 35 ++++++++++++++++++------ drivers/gpu/drm/virtio/virtgpu_drv.h | 4 +++ drivers/gpu/drm/virtio/virtgpu_vq.c | 18 +++++++++++- 3 files changed, 48 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index 39e4c52a0d0d..016fc78ed18a 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -161,18 +161,37 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc); struct drm_device *drm = crtc->dev; struct virtio_gpu_device *vgdev = drm->dev_private; + int i; - if(vgdev->has_vblank) { - if (crtc->state->event) { - spin_lock_irq(&drm->event_lock); - if (drm_crtc_vblank_get(crtc) != 0) - drm_crtc_send_vblank_event(crtc, crtc->state->event); - else - drm_crtc_arm_vblank_event(crtc, crtc->state->event); - spin_unlock_irq(&drm->event_lock); + if (vgdev->has_vblank && crtc->state->event) { + spin_lock_irq(&drm->event_lock); + if (drm_crtc_vblank_get(crtc) != 0) { + // Cannot enable vblank, send it right now. + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + } else { + for (i = 0; i < VBLANK_EVENT_CACHE_SIZE; i++) { + if (vgdev->cache_event[i] == NULL) { + break; + } + } + if (i == VBLANK_EVENT_CACHE_SIZE) { + // Cache is full, empty it cache or the system + // gets huang. + for (i = 0; i < VBLANK_EVENT_CACHE_SIZE; i++) { + if (vgdev->cache_event[i] != NULL) { + drm_crtc_send_vblank_event(crtc, vgdev->cache_event[i]); + vgdev->cache_event[i] = NULL; + } + } + i = 0; + } + vgdev->cache_event[i] = crtc->state->event; crtc->state->event = NULL; } + spin_unlock_irq(&drm->event_lock); } + if(vgdev->has_multi_plane) virtio_gpu_resource_flush_sync(crtc); diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index ae6aa4454a86..e34cd78dacc5 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -180,6 +180,9 @@ struct virtio_gpu_vbuffer { #define VIRTIO_GPU_MAX_PLANES 6 /*hardcode igpu scaler number ver>11 */ #define SKL_NUM_SCALERS 2 + +#define VBLANK_EVENT_CACHE_SIZE 3 + struct virtio_gpu_output { int index; struct drm_crtc crtc; @@ -238,6 +241,7 @@ struct virtio_gpu_device { struct virtio_device *vdev; struct virtio_gpu_output outputs[VIRTIO_GPU_MAX_SCANOUTS]; + struct drm_pending_vblank_event *cache_event[VBLANK_EVENT_CACHE_SIZE]; uint32_t num_scanouts; uint32_t num_vblankq; struct virtio_gpu_queue ctrlq; diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 48abdbef42dd..657e8f82fe75 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -100,7 +100,7 @@ void virtio_gpu_vblank_ack(struct virtqueue *vq) unsigned long irqflags; unsigned int len; unsigned int *ret_value; - int target = 0; + int target = 0, i, curr = 0; while((target < vgdev->num_vblankq) && (vgdev->vblank[target].vblank.vq != vq)) { target++; @@ -115,6 +115,22 @@ void virtio_gpu_vblank_ack(struct virtqueue *vq) spin_unlock_irqrestore(&vgdev->vblank[target].vblank.qlock, irqflags); drm_handle_vblank(dev, target); + spin_lock_irqsave(&dev->event_lock, irqflags); + for (i = 0; i < VBLANK_EVENT_CACHE_SIZE; i++) { + if (vgdev->cache_event[i]) { + if (i < *ret_value) { + drm_crtc_send_vblank_event(&vgdev->outputs[target].crtc, + vgdev->cache_event[i]); + vgdev->cache_event[i] = NULL; + } + } + } + + for (i = 0; i < VBLANK_EVENT_CACHE_SIZE; ++i) + if (vgdev->cache_event[i]) + vgdev->cache_event[curr++] = vgdev->cache_event[i]; + + spin_unlock_irqrestore(&dev->event_lock, irqflags); } void virtio_gpu_cursor_ack(struct virtqueue *vq)