1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <wayland-client.h>
25 
26 #include <assert.h>
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <unistd.h>
30 #include <errno.h>
31 #include <string.h>
32 #include <pthread.h>
33 #include <poll.h>
34 #include <sys/mman.h>
35 #include <sys/types.h>
36 
37 #include "drm-uapi/drm_fourcc.h"
38 
39 #include "vk_instance.h"
40 #include "vk_physical_device.h"
41 #include "vk_util.h"
42 #include "wsi_common_entrypoints.h"
43 #include "wsi_common_private.h"
44 #include "linux-dmabuf-unstable-v1-client-protocol.h"
45 #include "presentation-time-client-protocol.h"
46 #include "tearing-control-v1-client-protocol.h"
47 
48 #include <util/compiler.h>
49 #include <util/hash_table.h>
50 #include <util/timespec.h>
51 #include <util/u_endian.h>
52 #include <util/u_vector.h>
53 #include <util/u_dynarray.h>
54 #include <util/anon_file.h>
55 #include <util/os_time.h>
56 
57 #ifdef MAJOR_IN_MKDEV
58 #include <sys/mkdev.h>
59 #endif
60 #ifdef MAJOR_IN_SYSMACROS
61 #include <sys/sysmacros.h>
62 #endif
63 
64 struct wsi_wayland;
65 
66 struct wsi_wl_format {
67    VkFormat vk_format;
68    uint32_t flags;
69    struct u_vector modifiers;
70 };
71 
72 struct dmabuf_feedback_format_table {
73    unsigned int size;
74    struct {
75       uint32_t format;
76       uint32_t padding; /* unused */
77       uint64_t modifier;
78    } *data;
79 };
80 
81 struct dmabuf_feedback_tranche {
82    dev_t target_device;
83    uint32_t flags;
84    struct u_vector formats;
85 };
86 
87 struct dmabuf_feedback {
88    dev_t main_device;
89    struct dmabuf_feedback_format_table format_table;
90    struct util_dynarray tranches;
91    struct dmabuf_feedback_tranche pending_tranche;
92 };
93 
94 struct wsi_wl_display {
95    /* The real wl_display */
96    struct wl_display *wl_display;
97    /* Actually a proxy wrapper around the event queue */
98    struct wl_display *wl_display_wrapper;
99    struct wl_event_queue *queue;
100 
101    struct wl_shm *wl_shm;
102    struct zwp_linux_dmabuf_v1 *wl_dmabuf;
103    struct zwp_linux_dmabuf_feedback_v1 *wl_dmabuf_feedback;
104    struct wp_tearing_control_manager_v1 *tearing_control_manager;
105 
106    struct dmabuf_feedback_format_table format_table;
107 
108    /* users want per-chain wsi_wl_swapchain->present_ids.wp_presentation */
109    struct wp_presentation *wp_presentation_notwrapped;
110 
111    struct wsi_wayland *wsi_wl;
112 
113    /* Formats populated by zwp_linux_dmabuf_v1 or wl_shm interfaces */
114    struct u_vector formats;
115 
116    bool sw;
117 
118    dev_t main_device;
119    bool same_gpu;
120 };
121 
122 struct wsi_wayland {
123    struct wsi_interface base;
124 
125    struct wsi_device *wsi;
126 
127    const VkAllocationCallbacks *alloc;
128    VkPhysicalDevice physical_device;
129 };
130 
131 struct wsi_wl_image {
132    struct wsi_image base;
133    struct wl_buffer *buffer;
134    bool busy;
135    int shm_fd;
136    void *shm_ptr;
137    unsigned shm_size;
138 };
139 
140 enum wsi_wl_buffer_type {
141    WSI_WL_BUFFER_NATIVE,
142    WSI_WL_BUFFER_GPU_SHM,
143    WSI_WL_BUFFER_SHM_MEMCPY,
144 };
145 
146 struct wsi_wl_surface {
147    VkIcdSurfaceWayland base;
148 
149    struct wsi_wl_swapchain *chain;
150    struct wl_surface *surface;
151    struct wsi_wl_display *display;
152 
153    struct zwp_linux_dmabuf_feedback_v1 *wl_dmabuf_feedback;
154    struct dmabuf_feedback dmabuf_feedback, pending_dmabuf_feedback;
155 };
156 
157 struct wsi_wl_swapchain {
158    struct wsi_swapchain base;
159 
160    struct wsi_wl_surface *wsi_wl_surface;
161    struct wp_tearing_control_v1 *tearing_control;
162 
163    struct wl_callback *frame;
164 
165    VkExtent2D extent;
166    VkFormat vk_format;
167    enum wsi_wl_buffer_type buffer_type;
168    uint32_t drm_format;
169    enum wl_shm_format shm_format;
170 
171    bool suboptimal;
172 
173    uint32_t num_drm_modifiers;
174    const uint64_t *drm_modifiers;
175 
176    VkPresentModeKHR present_mode;
177    bool fifo_ready;
178 
179    struct {
180       pthread_mutex_t lock; /* protects all members */
181       uint64_t max_completed;
182       struct wl_list outstanding_list;
183       pthread_cond_t list_advanced;
184       struct wl_event_queue *queue;
185       struct wp_presentation *wp_presentation;
186       bool dispatch_in_progress;
187    } present_ids;
188 
189    struct wsi_wl_image images[0];
190 };
191 VK_DEFINE_NONDISP_HANDLE_CASTS(wsi_wl_swapchain, base.base, VkSwapchainKHR,
192                                VK_OBJECT_TYPE_SWAPCHAIN_KHR)
193 
194 enum wsi_wl_fmt_flag {
195    WSI_WL_FMT_ALPHA = 1 << 0,
196    WSI_WL_FMT_OPAQUE = 1 << 1,
197 };
198 
199 static struct wsi_wl_format *
find_format(struct u_vector * formats,VkFormat format)200 find_format(struct u_vector *formats, VkFormat format)
201 {
202    struct wsi_wl_format *f;
203 
204    u_vector_foreach(f, formats)
205       if (f->vk_format == format)
206          return f;
207 
208    return NULL;
209 }
210 
211 static struct wsi_wl_format *
wsi_wl_display_add_vk_format(struct wsi_wl_display * display,struct u_vector * formats,VkFormat format,uint32_t flags)212 wsi_wl_display_add_vk_format(struct wsi_wl_display *display,
213                              struct u_vector *formats,
214                              VkFormat format, uint32_t flags)
215 {
216    assert(flags & (WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE));
217 
218    /* Don't add a format that's already in the list */
219    struct wsi_wl_format *f = find_format(formats, format);
220    if (f) {
221       f->flags |= flags;
222       return f;
223    }
224 
225    /* Don't add formats that aren't renderable. */
226    VkFormatProperties props;
227 
228    display->wsi_wl->wsi->GetPhysicalDeviceFormatProperties(display->wsi_wl->physical_device,
229                                                            format, &props);
230    if (!(props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT))
231       return NULL;
232 
233    struct u_vector modifiers;
234    if (!u_vector_init_pow2(&modifiers, 4, sizeof(uint64_t)))
235       return NULL;
236 
237    f = u_vector_add(formats);
238    if (!f) {
239       u_vector_finish(&modifiers);
240       return NULL;
241    }
242 
243    f->vk_format = format;
244    f->flags = flags;
245    f->modifiers = modifiers;
246 
247    return f;
248 }
249 
250 static void
wsi_wl_format_add_modifier(struct wsi_wl_format * format,uint64_t modifier)251 wsi_wl_format_add_modifier(struct wsi_wl_format *format, uint64_t modifier)
252 {
253    uint64_t *mod;
254 
255    if (modifier == DRM_FORMAT_MOD_INVALID)
256       return;
257 
258    u_vector_foreach(mod, &format->modifiers)
259       if (*mod == modifier)
260          return;
261 
262    mod = u_vector_add(&format->modifiers);
263    if (mod)
264       *mod = modifier;
265 }
266 
267 static void
wsi_wl_display_add_vk_format_modifier(struct wsi_wl_display * display,struct u_vector * formats,VkFormat vk_format,uint32_t flags,uint64_t modifier)268 wsi_wl_display_add_vk_format_modifier(struct wsi_wl_display *display,
269                                       struct u_vector *formats,
270                                       VkFormat vk_format, uint32_t flags,
271                                       uint64_t modifier)
272 {
273    struct wsi_wl_format *format;
274 
275    format = wsi_wl_display_add_vk_format(display, formats, vk_format, flags);
276    if (format)
277       wsi_wl_format_add_modifier(format, modifier);
278 }
279 
280 static void
wsi_wl_display_add_drm_format_modifier(struct wsi_wl_display * display,struct u_vector * formats,uint32_t drm_format,uint64_t modifier)281 wsi_wl_display_add_drm_format_modifier(struct wsi_wl_display *display,
282                                        struct u_vector *formats,
283                                        uint32_t drm_format, uint64_t modifier)
284 {
285    switch (drm_format) {
286 #if 0
287    /* TODO: These are only available when VK_EXT_4444_formats is enabled, so
288     * we probably need to make their use conditional on this extension. */
289    case DRM_FORMAT_ARGB4444:
290       wsi_wl_display_add_vk_format_modifier(display, formats,
291                                             VK_FORMAT_A4R4G4B4_UNORM_PACK16,
292                                             WSI_WL_FMT_ALPHA, modifier);
293       break;
294    case DRM_FORMAT_XRGB4444:
295       wsi_wl_display_add_vk_format_modifier(display, formats,
296                                             VK_FORMAT_A4R4G4B4_UNORM_PACK16,
297                                             WSI_WL_FMT_OPAQUE, modifier);
298       break;
299    case DRM_FORMAT_ABGR4444:
300       wsi_wl_display_add_vk_format_modifier(display, formats,
301                                             VK_FORMAT_A4B4G4R4_UNORM_PACK16,
302                                             WSI_WL_FMT_ALPHA, modifier);
303       break;
304    case DRM_FORMAT_XBGR4444:
305       wsi_wl_display_add_vk_format_modifier(display, formats,
306                                             VK_FORMAT_A4B4G4R4_UNORM_PACK16,
307                                             WSI_WL_FMT_OPAQUE, modifier);
308       break;
309 #endif
310 
311    /* Vulkan _PACKN formats have the same component order as DRM formats
312     * on little endian systems, on big endian there exists no analog. */
313 #if UTIL_ARCH_LITTLE_ENDIAN
314    case DRM_FORMAT_RGBA4444:
315       wsi_wl_display_add_vk_format_modifier(display, formats,
316                                             VK_FORMAT_R4G4B4A4_UNORM_PACK16,
317                                             WSI_WL_FMT_ALPHA, modifier);
318       break;
319    case DRM_FORMAT_RGBX4444:
320       wsi_wl_display_add_vk_format_modifier(display, formats,
321                                             VK_FORMAT_R4G4B4A4_UNORM_PACK16,
322                                             WSI_WL_FMT_OPAQUE, modifier);
323       break;
324    case DRM_FORMAT_BGRA4444:
325       wsi_wl_display_add_vk_format_modifier(display, formats,
326                                             VK_FORMAT_B4G4R4A4_UNORM_PACK16,
327                                             WSI_WL_FMT_ALPHA, modifier);
328       break;
329    case DRM_FORMAT_BGRX4444:
330       wsi_wl_display_add_vk_format_modifier(display, formats,
331                                             VK_FORMAT_B4G4R4A4_UNORM_PACK16,
332                                             WSI_WL_FMT_OPAQUE, modifier);
333       break;
334    case DRM_FORMAT_RGB565:
335       wsi_wl_display_add_vk_format_modifier(display, formats,
336                                             VK_FORMAT_R5G6B5_UNORM_PACK16,
337                                             WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
338                                             modifier);
339       break;
340    case DRM_FORMAT_BGR565:
341       wsi_wl_display_add_vk_format_modifier(display, formats,
342                                             VK_FORMAT_B5G6R5_UNORM_PACK16,
343                                             WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
344                                             modifier);
345       break;
346    case DRM_FORMAT_ARGB1555:
347       wsi_wl_display_add_vk_format_modifier(display, formats,
348                                             VK_FORMAT_A1R5G5B5_UNORM_PACK16,
349                                             WSI_WL_FMT_ALPHA, modifier);
350       break;
351    case DRM_FORMAT_XRGB1555:
352       wsi_wl_display_add_vk_format_modifier(display, formats,
353                                             VK_FORMAT_A1R5G5B5_UNORM_PACK16,
354                                             WSI_WL_FMT_OPAQUE, modifier);
355       break;
356    case DRM_FORMAT_RGBA5551:
357       wsi_wl_display_add_vk_format_modifier(display, formats,
358                                             VK_FORMAT_R5G5B5A1_UNORM_PACK16,
359                                             WSI_WL_FMT_ALPHA, modifier);
360       break;
361    case DRM_FORMAT_RGBX5551:
362       wsi_wl_display_add_vk_format_modifier(display, formats,
363                                             VK_FORMAT_R5G5B5A1_UNORM_PACK16,
364                                             WSI_WL_FMT_OPAQUE, modifier);
365       break;
366    case DRM_FORMAT_BGRA5551:
367       wsi_wl_display_add_vk_format_modifier(display, formats,
368                                             VK_FORMAT_B5G5R5A1_UNORM_PACK16,
369                                             WSI_WL_FMT_ALPHA, modifier);
370       break;
371    case DRM_FORMAT_BGRX5551:
372       wsi_wl_display_add_vk_format_modifier(display, formats,
373                                             VK_FORMAT_B5G5R5A1_UNORM_PACK16,
374                                             WSI_WL_FMT_OPAQUE, modifier);
375       break;
376    case DRM_FORMAT_ARGB2101010:
377       wsi_wl_display_add_vk_format_modifier(display, formats,
378                                             VK_FORMAT_A2R10G10B10_UNORM_PACK32,
379                                             WSI_WL_FMT_ALPHA, modifier);
380       break;
381    case DRM_FORMAT_XRGB2101010:
382       wsi_wl_display_add_vk_format_modifier(display, formats,
383                                             VK_FORMAT_A2R10G10B10_UNORM_PACK32,
384                                             WSI_WL_FMT_OPAQUE, modifier);
385       break;
386    case DRM_FORMAT_ABGR2101010:
387       wsi_wl_display_add_vk_format_modifier(display, formats,
388                                             VK_FORMAT_A2B10G10R10_UNORM_PACK32,
389                                             WSI_WL_FMT_ALPHA, modifier);
390       break;
391    case DRM_FORMAT_XBGR2101010:
392       wsi_wl_display_add_vk_format_modifier(display, formats,
393                                             VK_FORMAT_A2B10G10R10_UNORM_PACK32,
394                                             WSI_WL_FMT_OPAQUE, modifier);
395       break;
396 
397    /* Vulkan 16-bits-per-channel formats have an inverted channel order
398     * compared to DRM formats, just like the 8-bits-per-channel ones.
399     * On little endian systems the memory representation of each channel
400     * matches the DRM formats'. */
401    case DRM_FORMAT_ABGR16161616:
402       wsi_wl_display_add_vk_format_modifier(display, formats,
403                                             VK_FORMAT_R16G16B16A16_UNORM,
404                                             WSI_WL_FMT_ALPHA, modifier);
405       break;
406    case DRM_FORMAT_XBGR16161616:
407       wsi_wl_display_add_vk_format_modifier(display, formats,
408                                             VK_FORMAT_R16G16B16A16_UNORM,
409                                             WSI_WL_FMT_OPAQUE, modifier);
410       break;
411    case DRM_FORMAT_ABGR16161616F:
412       wsi_wl_display_add_vk_format_modifier(display, formats,
413                                             VK_FORMAT_R16G16B16A16_SFLOAT,
414                                             WSI_WL_FMT_ALPHA, modifier);
415       break;
416    case DRM_FORMAT_XBGR16161616F:
417       wsi_wl_display_add_vk_format_modifier(display, formats,
418                                             VK_FORMAT_R16G16B16A16_SFLOAT,
419                                             WSI_WL_FMT_OPAQUE, modifier);
420       break;
421 #endif
422 
423    /* Non-packed 8-bit formats have an inverted channel order compared to the
424     * little endian DRM formats, because the DRM channel ordering is high->low
425     * but the vulkan channel ordering is in memory byte order
426     *
427     * For all UNORM formats which have a SRGB variant, we must support both if
428     * we can. SRGB in this context means that rendering to it will result in a
429     * linear -> nonlinear SRGB colorspace conversion before the data is stored.
430     * The inverse function is applied when sampling from SRGB images.
431     * From Wayland's perspective nothing changes, the difference is just how
432     * Vulkan interprets the pixel data. */
433    case DRM_FORMAT_XBGR8888:
434       wsi_wl_display_add_vk_format_modifier(display, formats,
435                                             VK_FORMAT_R8G8B8_SRGB,
436                                             WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
437                                             modifier);
438       wsi_wl_display_add_vk_format_modifier(display, formats,
439                                             VK_FORMAT_R8G8B8_UNORM,
440                                             WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
441                                             modifier);
442       wsi_wl_display_add_vk_format_modifier(display, formats,
443                                             VK_FORMAT_R8G8B8A8_SRGB,
444                                             WSI_WL_FMT_OPAQUE, modifier);
445       wsi_wl_display_add_vk_format_modifier(display, formats,
446                                             VK_FORMAT_R8G8B8A8_UNORM,
447                                             WSI_WL_FMT_OPAQUE, modifier);
448       break;
449    case DRM_FORMAT_ABGR8888:
450       wsi_wl_display_add_vk_format_modifier(display, formats,
451                                             VK_FORMAT_R8G8B8A8_SRGB,
452                                             WSI_WL_FMT_ALPHA, modifier);
453       wsi_wl_display_add_vk_format_modifier(display, formats,
454                                             VK_FORMAT_R8G8B8A8_UNORM,
455                                             WSI_WL_FMT_ALPHA, modifier);
456       break;
457    case DRM_FORMAT_XRGB8888:
458       wsi_wl_display_add_vk_format_modifier(display, formats,
459                                             VK_FORMAT_B8G8R8_SRGB,
460                                             WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
461                                             modifier);
462       wsi_wl_display_add_vk_format_modifier(display, formats,
463                                             VK_FORMAT_B8G8R8_UNORM,
464                                             WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
465                                             modifier);
466       wsi_wl_display_add_vk_format_modifier(display, formats,
467                                             VK_FORMAT_B8G8R8A8_SRGB,
468                                             WSI_WL_FMT_OPAQUE, modifier);
469       wsi_wl_display_add_vk_format_modifier(display, formats,
470                                             VK_FORMAT_B8G8R8A8_UNORM,
471                                             WSI_WL_FMT_OPAQUE, modifier);
472       break;
473    case DRM_FORMAT_ARGB8888:
474       wsi_wl_display_add_vk_format_modifier(display, formats,
475                                             VK_FORMAT_B8G8R8A8_SRGB,
476                                             WSI_WL_FMT_ALPHA, modifier);
477       wsi_wl_display_add_vk_format_modifier(display, formats,
478                                             VK_FORMAT_B8G8R8A8_UNORM,
479                                             WSI_WL_FMT_ALPHA, modifier);
480       break;
481    }
482 }
483 
484 static uint32_t
drm_format_for_wl_shm_format(enum wl_shm_format shm_format)485 drm_format_for_wl_shm_format(enum wl_shm_format shm_format)
486 {
487    /* wl_shm formats are identical to DRM, except ARGB8888 and XRGB8888 */
488    switch (shm_format) {
489    case WL_SHM_FORMAT_ARGB8888:
490       return DRM_FORMAT_ARGB8888;
491    case WL_SHM_FORMAT_XRGB8888:
492       return DRM_FORMAT_XRGB8888;
493    default:
494       return shm_format;
495    }
496 }
497 
498 static void
wsi_wl_display_add_wl_shm_format(struct wsi_wl_display * display,struct u_vector * formats,enum wl_shm_format shm_format)499 wsi_wl_display_add_wl_shm_format(struct wsi_wl_display *display,
500                                  struct u_vector *formats,
501                                  enum wl_shm_format shm_format)
502 {
503    uint32_t drm_format = drm_format_for_wl_shm_format(shm_format);
504 
505    wsi_wl_display_add_drm_format_modifier(display, formats, drm_format,
506                                           DRM_FORMAT_MOD_INVALID);
507 }
508 
509 static uint32_t
wl_drm_format_for_vk_format(VkFormat vk_format,bool alpha)510 wl_drm_format_for_vk_format(VkFormat vk_format, bool alpha)
511 {
512    switch (vk_format) {
513 #if 0
514    case VK_FORMAT_A4R4G4B4_UNORM_PACK16:
515       return alpha ? DRM_FORMAT_ARGB4444 : DRM_FORMAT_XRGB4444;
516    case VK_FORMAT_A4B4G4R4_UNORM_PACK16:
517       return alpha ? DRM_FORMAT_ABGR4444 : DRM_FORMAT_XBGR4444;
518 #endif
519 #if UTIL_ARCH_LITTLE_ENDIAN
520    case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
521       return alpha ? DRM_FORMAT_RGBA4444 : DRM_FORMAT_RGBX4444;
522    case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
523       return alpha ? DRM_FORMAT_BGRA4444 : DRM_FORMAT_BGRX4444;
524    case VK_FORMAT_R5G6B5_UNORM_PACK16:
525       return DRM_FORMAT_RGB565;
526    case VK_FORMAT_B5G6R5_UNORM_PACK16:
527       return DRM_FORMAT_BGR565;
528    case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
529       return alpha ? DRM_FORMAT_ARGB1555 : DRM_FORMAT_XRGB1555;
530    case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
531       return alpha ? DRM_FORMAT_RGBA5551 : DRM_FORMAT_RGBX5551;
532    case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
533       return alpha ? DRM_FORMAT_BGRA5551 : DRM_FORMAT_BGRX5551;
534    case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
535       return alpha ? DRM_FORMAT_ARGB2101010 : DRM_FORMAT_XRGB2101010;
536    case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
537       return alpha ? DRM_FORMAT_ABGR2101010 : DRM_FORMAT_XBGR2101010;
538    case VK_FORMAT_R16G16B16A16_UNORM:
539       return alpha ? DRM_FORMAT_ABGR16161616 : DRM_FORMAT_XBGR16161616;
540    case VK_FORMAT_R16G16B16A16_SFLOAT:
541       return alpha ? DRM_FORMAT_ABGR16161616F : DRM_FORMAT_XBGR16161616F;
542 #endif
543    case VK_FORMAT_R8G8B8_UNORM:
544    case VK_FORMAT_R8G8B8_SRGB:
545       return DRM_FORMAT_XBGR8888;
546    case VK_FORMAT_R8G8B8A8_UNORM:
547    case VK_FORMAT_R8G8B8A8_SRGB:
548       return alpha ? DRM_FORMAT_ABGR8888 : DRM_FORMAT_XBGR8888;
549    case VK_FORMAT_B8G8R8_UNORM:
550    case VK_FORMAT_B8G8R8_SRGB:
551       return DRM_FORMAT_BGRX8888;
552    case VK_FORMAT_B8G8R8A8_UNORM:
553    case VK_FORMAT_B8G8R8A8_SRGB:
554       return alpha ? DRM_FORMAT_ARGB8888 : DRM_FORMAT_XRGB8888;
555 
556    default:
557       assert(!"Unsupported Vulkan format");
558       return DRM_FORMAT_INVALID;
559    }
560 }
561 
562 static enum wl_shm_format
wl_shm_format_for_vk_format(VkFormat vk_format,bool alpha)563 wl_shm_format_for_vk_format(VkFormat vk_format, bool alpha)
564 {
565    uint32_t drm_format = wl_drm_format_for_vk_format(vk_format, alpha);
566    if (drm_format == DRM_FORMAT_INVALID) {
567       return 0;
568    }
569 
570    /* wl_shm formats are identical to DRM, except ARGB8888 and XRGB8888 */
571    switch (drm_format) {
572    case DRM_FORMAT_ARGB8888:
573       return WL_SHM_FORMAT_ARGB8888;
574    case DRM_FORMAT_XRGB8888:
575       return WL_SHM_FORMAT_XRGB8888;
576    default:
577       return drm_format;
578    }
579 }
580 
581 static void
dmabuf_handle_format(void * data,struct zwp_linux_dmabuf_v1 * dmabuf,uint32_t format)582 dmabuf_handle_format(void *data, struct zwp_linux_dmabuf_v1 *dmabuf,
583                      uint32_t format)
584 {
585    /* Formats are implicitly advertised by the modifier event, so we ignore
586     * them here. */
587 }
588 
589 static void
dmabuf_handle_modifier(void * data,struct zwp_linux_dmabuf_v1 * dmabuf,uint32_t format,uint32_t modifier_hi,uint32_t modifier_lo)590 dmabuf_handle_modifier(void *data, struct zwp_linux_dmabuf_v1 *dmabuf,
591                        uint32_t format, uint32_t modifier_hi,
592                        uint32_t modifier_lo)
593 {
594    struct wsi_wl_display *display = data;
595    uint64_t modifier;
596 
597    /* Ignore this if the compositor advertised dma-buf feedback. From version 4
598     * onwards (when dma-buf feedback was introduced), the compositor should not
599     * advertise this event anymore, but let's keep this for safety. */
600    if (display->wl_dmabuf_feedback)
601       return;
602 
603    modifier = ((uint64_t) modifier_hi << 32) | modifier_lo;
604    wsi_wl_display_add_drm_format_modifier(display, &display->formats,
605                                           format, modifier);
606 }
607 
608 static const struct zwp_linux_dmabuf_v1_listener dmabuf_listener = {
609    dmabuf_handle_format,
610    dmabuf_handle_modifier,
611 };
612 
613 static void
dmabuf_feedback_format_table_fini(struct dmabuf_feedback_format_table * format_table)614 dmabuf_feedback_format_table_fini(struct dmabuf_feedback_format_table *format_table)
615 {
616    if (format_table->data && format_table->data != MAP_FAILED)
617       munmap(format_table->data, format_table->size);
618 }
619 
620 static void
dmabuf_feedback_format_table_init(struct dmabuf_feedback_format_table * format_table)621 dmabuf_feedback_format_table_init(struct dmabuf_feedback_format_table *format_table)
622 {
623    memset(format_table, 0, sizeof(*format_table));
624 }
625 
626 static void
dmabuf_feedback_tranche_fini(struct dmabuf_feedback_tranche * tranche)627 dmabuf_feedback_tranche_fini(struct dmabuf_feedback_tranche *tranche)
628 {
629    struct wsi_wl_format *format;
630 
631    u_vector_foreach(format, &tranche->formats)
632       u_vector_finish(&format->modifiers);
633 
634    u_vector_finish(&tranche->formats);
635 }
636 
637 static int
dmabuf_feedback_tranche_init(struct dmabuf_feedback_tranche * tranche)638 dmabuf_feedback_tranche_init(struct dmabuf_feedback_tranche *tranche)
639 {
640    memset(tranche, 0, sizeof(*tranche));
641 
642    if (!u_vector_init(&tranche->formats, 8, sizeof(struct wsi_wl_format)))
643       return -1;
644 
645    return 0;
646 }
647 
648 static void
dmabuf_feedback_fini(struct dmabuf_feedback * dmabuf_feedback)649 dmabuf_feedback_fini(struct dmabuf_feedback *dmabuf_feedback)
650 {
651    dmabuf_feedback_tranche_fini(&dmabuf_feedback->pending_tranche);
652 
653    util_dynarray_foreach(&dmabuf_feedback->tranches,
654                          struct dmabuf_feedback_tranche, tranche)
655       dmabuf_feedback_tranche_fini(tranche);
656    util_dynarray_fini(&dmabuf_feedback->tranches);
657 
658    dmabuf_feedback_format_table_fini(&dmabuf_feedback->format_table);
659 }
660 
661 static int
dmabuf_feedback_init(struct dmabuf_feedback * dmabuf_feedback)662 dmabuf_feedback_init(struct dmabuf_feedback *dmabuf_feedback)
663 {
664    memset(dmabuf_feedback, 0, sizeof(*dmabuf_feedback));
665 
666    if (dmabuf_feedback_tranche_init(&dmabuf_feedback->pending_tranche) < 0)
667       return -1;
668 
669    util_dynarray_init(&dmabuf_feedback->tranches, NULL);
670 
671    dmabuf_feedback_format_table_init(&dmabuf_feedback->format_table);
672 
673    return 0;
674 }
675 
676 static void
default_dmabuf_feedback_format_table(void * data,struct zwp_linux_dmabuf_feedback_v1 * zwp_linux_dmabuf_feedback_v1,int32_t fd,uint32_t size)677 default_dmabuf_feedback_format_table(void *data,
678                                      struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1,
679                                      int32_t fd, uint32_t size)
680 {
681    struct wsi_wl_display *display = data;
682 
683    display->format_table.size = size;
684    display->format_table.data = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
685 
686    close(fd);
687 }
688 
689 static void
default_dmabuf_feedback_main_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)690 default_dmabuf_feedback_main_device(void *data,
691                                     struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
692                                     struct wl_array *device)
693 {
694    struct wsi_wl_display *display = data;
695 
696    assert(device->size == sizeof(dev_t));
697    memcpy(&display->main_device, device->data, device->size);
698 }
699 
700 static void
default_dmabuf_feedback_tranche_target_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)701 default_dmabuf_feedback_tranche_target_device(void *data,
702                                               struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
703                                               struct wl_array *device)
704 {
705    /* ignore this event */
706 }
707 
708 static void
default_dmabuf_feedback_tranche_flags(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,uint32_t flags)709 default_dmabuf_feedback_tranche_flags(void *data,
710                                       struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
711                                       uint32_t flags)
712 {
713    /* ignore this event */
714 }
715 
716 static void
default_dmabuf_feedback_tranche_formats(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * indices)717 default_dmabuf_feedback_tranche_formats(void *data,
718                                         struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
719                                         struct wl_array *indices)
720 {
721    struct wsi_wl_display *display = data;
722    uint32_t format;
723    uint64_t modifier;
724    uint16_t *index;
725 
726    /* We couldn't map the format table or the compositor didn't advertise it,
727     * so we have to ignore the feedback. */
728    if (display->format_table.data == MAP_FAILED ||
729        display->format_table.data == NULL)
730       return;
731 
732    wl_array_for_each(index, indices) {
733       format = display->format_table.data[*index].format;
734       modifier = display->format_table.data[*index].modifier;
735       wsi_wl_display_add_drm_format_modifier(display, &display->formats,
736                                              format, modifier);
737    }
738 }
739 
740 static void
default_dmabuf_feedback_tranche_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)741 default_dmabuf_feedback_tranche_done(void *data,
742                                      struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
743 {
744    /* ignore this event */
745 }
746 
747 static void
default_dmabuf_feedback_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)748 default_dmabuf_feedback_done(void *data,
749                              struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
750 {
751    /* ignore this event */
752 }
753 
754 static const struct zwp_linux_dmabuf_feedback_v1_listener
755 dmabuf_feedback_listener = {
756    .format_table = default_dmabuf_feedback_format_table,
757    .main_device = default_dmabuf_feedback_main_device,
758    .tranche_target_device = default_dmabuf_feedback_tranche_target_device,
759    .tranche_flags = default_dmabuf_feedback_tranche_flags,
760    .tranche_formats = default_dmabuf_feedback_tranche_formats,
761    .tranche_done = default_dmabuf_feedback_tranche_done,
762    .done = default_dmabuf_feedback_done,
763 };
764 
765 static void
shm_handle_format(void * data,struct wl_shm * shm,uint32_t format)766 shm_handle_format(void *data, struct wl_shm *shm, uint32_t format)
767 {
768    struct wsi_wl_display *display = data;
769 
770    wsi_wl_display_add_wl_shm_format(display, &display->formats, format);
771 }
772 
773 static const struct wl_shm_listener shm_listener = {
774    .format = shm_handle_format
775 };
776 
777 static void
registry_handle_global(void * data,struct wl_registry * registry,uint32_t name,const char * interface,uint32_t version)778 registry_handle_global(void *data, struct wl_registry *registry,
779                        uint32_t name, const char *interface, uint32_t version)
780 {
781    struct wsi_wl_display *display = data;
782 
783    if (display->sw) {
784       if (strcmp(interface, wl_shm_interface.name) == 0) {
785          display->wl_shm = wl_registry_bind(registry, name, &wl_shm_interface, 1);
786          wl_shm_add_listener(display->wl_shm, &shm_listener, display);
787       }
788    } else {
789       if (strcmp(interface, zwp_linux_dmabuf_v1_interface.name) == 0 && version >= 3) {
790          display->wl_dmabuf =
791             wl_registry_bind(registry, name, &zwp_linux_dmabuf_v1_interface,
792                              MIN2(version, ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION));
793          zwp_linux_dmabuf_v1_add_listener(display->wl_dmabuf,
794                                           &dmabuf_listener, display);
795       }
796    }
797 
798    if (strcmp(interface, wp_presentation_interface.name) == 0) {
799       display->wp_presentation_notwrapped =
800          wl_registry_bind(registry, name, &wp_presentation_interface, 1);
801    } else if (strcmp(interface, wp_tearing_control_v1_interface.name) == 0) {
802       display->tearing_control_manager =
803          wl_registry_bind(registry, name, &wp_tearing_control_manager_v1_interface, 1);
804    }
805 }
806 
807 static void
registry_handle_global_remove(void * data,struct wl_registry * registry,uint32_t name)808 registry_handle_global_remove(void *data, struct wl_registry *registry,
809                               uint32_t name)
810 { /* No-op */ }
811 
812 static const struct wl_registry_listener registry_listener = {
813    registry_handle_global,
814    registry_handle_global_remove
815 };
816 
817 static void
wsi_wl_display_finish(struct wsi_wl_display * display)818 wsi_wl_display_finish(struct wsi_wl_display *display)
819 {
820    struct wsi_wl_format *f;
821    u_vector_foreach(f, &display->formats)
822       u_vector_finish(&f->modifiers);
823    u_vector_finish(&display->formats);
824    if (display->wl_shm)
825       wl_shm_destroy(display->wl_shm);
826    if (display->wl_dmabuf)
827       zwp_linux_dmabuf_v1_destroy(display->wl_dmabuf);
828    if (display->wp_presentation_notwrapped)
829       wp_presentation_destroy(display->wp_presentation_notwrapped);
830    if (display->tearing_control_manager)
831       wp_tearing_control_manager_v1_destroy(display->tearing_control_manager);
832    if (display->wl_display_wrapper)
833       wl_proxy_wrapper_destroy(display->wl_display_wrapper);
834    if (display->queue)
835       wl_event_queue_destroy(display->queue);
836 }
837 
838 static VkResult
wsi_wl_display_init(struct wsi_wayland * wsi_wl,struct wsi_wl_display * display,struct wl_display * wl_display,bool get_format_list,bool sw)839 wsi_wl_display_init(struct wsi_wayland *wsi_wl,
840                     struct wsi_wl_display *display,
841                     struct wl_display *wl_display,
842                     bool get_format_list, bool sw)
843 {
844    VkResult result = VK_SUCCESS;
845    memset(display, 0, sizeof(*display));
846 
847    if (!u_vector_init(&display->formats, 8, sizeof(struct wsi_wl_format)))
848       return VK_ERROR_OUT_OF_HOST_MEMORY;
849 
850    display->wsi_wl = wsi_wl;
851    display->wl_display = wl_display;
852    display->sw = sw;
853 
854    display->queue = wl_display_create_queue(wl_display);
855    if (!display->queue) {
856       result = VK_ERROR_OUT_OF_HOST_MEMORY;
857       goto fail;
858    }
859 
860    display->wl_display_wrapper = wl_proxy_create_wrapper(wl_display);
861    if (!display->wl_display_wrapper) {
862       result = VK_ERROR_OUT_OF_HOST_MEMORY;
863       goto fail;
864    }
865 
866    wl_proxy_set_queue((struct wl_proxy *) display->wl_display_wrapper,
867                       display->queue);
868 
869    struct wl_registry *registry =
870       wl_display_get_registry(display->wl_display_wrapper);
871    if (!registry) {
872       result = VK_ERROR_OUT_OF_HOST_MEMORY;
873       goto fail;
874    }
875 
876    wl_registry_add_listener(registry, &registry_listener, display);
877 
878    /* Round-trip to get wl_shm and zwp_linux_dmabuf_v1 globals */
879    wl_display_roundtrip_queue(display->wl_display, display->queue);
880    if (!display->wl_dmabuf && !display->wl_shm) {
881       result = VK_ERROR_SURFACE_LOST_KHR;
882       goto fail_registry;
883    }
884 
885    /* Caller doesn't expect us to query formats/modifiers, so return */
886    if (!get_format_list)
887       goto out;
888 
889    /* Default assumption */
890    display->same_gpu = true;
891 
892    /* Get the default dma-buf feedback */
893    if (display->wl_dmabuf && zwp_linux_dmabuf_v1_get_version(display->wl_dmabuf) >=
894                              ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION) {
895          dmabuf_feedback_format_table_init(&display->format_table);
896          display->wl_dmabuf_feedback =
897             zwp_linux_dmabuf_v1_get_default_feedback(display->wl_dmabuf);
898          zwp_linux_dmabuf_feedback_v1_add_listener(display->wl_dmabuf_feedback,
899                                                    &dmabuf_feedback_listener, display);
900 
901          /* Round-trip again to fetch dma-buf feedback */
902          wl_display_roundtrip_queue(display->wl_display, display->queue);
903 
904          if (wsi_wl->wsi->drm_info.hasRender ||
905              wsi_wl->wsi->drm_info.hasPrimary) {
906             /* Apparently some wayland compositor do not send the render
907              * device node but the primary, so test against both.
908              */
909             display->same_gpu =
910                (wsi_wl->wsi->drm_info.hasRender &&
911                 major(display->main_device) == wsi_wl->wsi->drm_info.renderMajor &&
912                 minor(display->main_device) == wsi_wl->wsi->drm_info.renderMinor) ||
913                (wsi_wl->wsi->drm_info.hasPrimary &&
914                 major(display->main_device) == wsi_wl->wsi->drm_info.primaryMajor &&
915                 minor(display->main_device) == wsi_wl->wsi->drm_info.primaryMinor);
916          }
917    }
918 
919    /* Round-trip again to get formats and modifiers */
920    wl_display_roundtrip_queue(display->wl_display, display->queue);
921 
922    if (wsi_wl->wsi->force_bgra8_unorm_first) {
923       /* Find BGRA8_UNORM in the list and swap it to the first position if we
924        * can find it.  Some apps get confused if SRGB is first in the list.
925        */
926       struct wsi_wl_format *first_fmt = u_vector_head(&display->formats);
927       struct wsi_wl_format *f, tmp_fmt;
928       f = find_format(&display->formats, VK_FORMAT_B8G8R8A8_UNORM);
929       if (f) {
930          tmp_fmt = *f;
931          *f = *first_fmt;
932          *first_fmt = tmp_fmt;
933       }
934    }
935 
936 out:
937    /* We don't need this anymore */
938    wl_registry_destroy(registry);
939 
940    /* Destroy default dma-buf feedback object and format table */
941    if (display->wl_dmabuf_feedback) {
942       zwp_linux_dmabuf_feedback_v1_destroy(display->wl_dmabuf_feedback);
943       display->wl_dmabuf_feedback = NULL;
944       dmabuf_feedback_format_table_fini(&display->format_table);
945    }
946 
947    return VK_SUCCESS;
948 
949 fail_registry:
950    if (registry)
951       wl_registry_destroy(registry);
952 
953 fail:
954    wsi_wl_display_finish(display);
955    return result;
956 }
957 
958 static VkResult
wsi_wl_display_create(struct wsi_wayland * wsi,struct wl_display * wl_display,bool sw,struct wsi_wl_display ** display_out)959 wsi_wl_display_create(struct wsi_wayland *wsi, struct wl_display *wl_display,
960                       bool sw,
961                       struct wsi_wl_display **display_out)
962 {
963    struct wsi_wl_display *display =
964       vk_alloc(wsi->alloc, sizeof(*display), 8,
965                VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
966    if (!display)
967       return VK_ERROR_OUT_OF_HOST_MEMORY;
968 
969    VkResult result = wsi_wl_display_init(wsi, display, wl_display, true,
970                                          sw);
971    if (result != VK_SUCCESS) {
972       vk_free(wsi->alloc, display);
973       return result;
974    }
975 
976    *display_out = display;
977 
978    return result;
979 }
980 
981 static void
wsi_wl_display_destroy(struct wsi_wl_display * display)982 wsi_wl_display_destroy(struct wsi_wl_display *display)
983 {
984    struct wsi_wayland *wsi = display->wsi_wl;
985    wsi_wl_display_finish(display);
986    vk_free(wsi->alloc, display);
987 }
988 
989 VKAPI_ATTR VkBool32 VKAPI_CALL
wsi_GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,struct wl_display * wl_display)990 wsi_GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
991                                                    uint32_t queueFamilyIndex,
992                                                    struct wl_display *wl_display)
993 {
994    VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
995    struct wsi_device *wsi_device = pdevice->wsi_device;
996    struct wsi_wayland *wsi =
997       (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
998 
999    struct wsi_wl_display display;
1000    VkResult ret = wsi_wl_display_init(wsi, &display, wl_display, false,
1001                                       wsi_device->sw);
1002    if (ret == VK_SUCCESS)
1003       wsi_wl_display_finish(&display);
1004 
1005    return ret == VK_SUCCESS;
1006 }
1007 
1008 static VkResult
wsi_wl_surface_get_support(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,uint32_t queueFamilyIndex,VkBool32 * pSupported)1009 wsi_wl_surface_get_support(VkIcdSurfaceBase *surface,
1010                            struct wsi_device *wsi_device,
1011                            uint32_t queueFamilyIndex,
1012                            VkBool32* pSupported)
1013 {
1014    *pSupported = true;
1015 
1016    return VK_SUCCESS;
1017 }
1018 
1019 static uint32_t
wsi_wl_surface_get_min_image_count(const VkSurfacePresentModeEXT * present_mode)1020 wsi_wl_surface_get_min_image_count(const VkSurfacePresentModeEXT *present_mode)
1021 {
1022    if (present_mode && (present_mode->presentMode == VK_PRESENT_MODE_FIFO_KHR ||
1023                         present_mode->presentMode == VK_PRESENT_MODE_FIFO_RELAXED_KHR)) {
1024       /* If we receive a FIFO present mode, only 2 images is required for forward progress.
1025        * Performance with 2 images will be questionable, but we only allow it for applications
1026        * using the new API, so we don't risk breaking any existing apps this way.
1027        * Other ICDs expose 2 images here already. */
1028        return 2;
1029    } else {
1030       /* For true mailbox mode, we need at least 4 images:
1031        *  1) One to scan out from
1032        *  2) One to have queued for scan-out
1033        *  3) One to be currently held by the Wayland compositor
1034        *  4) One to render to
1035        */
1036       return 4;
1037    }
1038 }
1039 
1040 static uint32_t
wsi_wl_surface_get_min_image_count_for_mode_group(const VkSwapchainPresentModesCreateInfoEXT * modes)1041 wsi_wl_surface_get_min_image_count_for_mode_group(const VkSwapchainPresentModesCreateInfoEXT *modes)
1042 {
1043    /* If we don't provide the PresentModeCreateInfo struct, we must be backwards compatible,
1044     * and assume that minImageCount is the default one, i.e. 4, which supports both FIFO and MAILBOX. */
1045    if (!modes) {
1046       return wsi_wl_surface_get_min_image_count(NULL);
1047    }
1048 
1049    uint32_t max_required = 0;
1050    for (uint32_t i = 0; i < modes->presentModeCount; i++) {
1051       const VkSurfacePresentModeEXT mode = {
1052          VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_EXT,
1053          NULL,
1054          modes->pPresentModes[i]
1055       };
1056       max_required = MAX2(max_required, wsi_wl_surface_get_min_image_count(&mode));
1057    }
1058 
1059    return max_required;
1060 }
1061 
1062 static VkResult
wsi_wl_surface_get_capabilities(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,const VkSurfacePresentModeEXT * present_mode,VkSurfaceCapabilitiesKHR * caps)1063 wsi_wl_surface_get_capabilities(VkIcdSurfaceBase *surface,
1064                                 struct wsi_device *wsi_device,
1065                                 const VkSurfacePresentModeEXT *present_mode,
1066                                 VkSurfaceCapabilitiesKHR* caps)
1067 {
1068    caps->minImageCount = wsi_wl_surface_get_min_image_count(present_mode);
1069    /* There is no real maximum */
1070    caps->maxImageCount = 0;
1071 
1072    caps->currentExtent = (VkExtent2D) { UINT32_MAX, UINT32_MAX };
1073    caps->minImageExtent = (VkExtent2D) { 1, 1 };
1074    caps->maxImageExtent = (VkExtent2D) {
1075       wsi_device->maxImageDimension2D,
1076       wsi_device->maxImageDimension2D,
1077    };
1078 
1079    caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
1080    caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
1081    caps->maxImageArrayLayers = 1;
1082 
1083    caps->supportedCompositeAlpha =
1084       VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
1085       VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
1086 
1087    caps->supportedUsageFlags =
1088       VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
1089       VK_IMAGE_USAGE_SAMPLED_BIT |
1090       VK_IMAGE_USAGE_TRANSFER_DST_BIT |
1091       VK_IMAGE_USAGE_STORAGE_BIT |
1092       VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
1093       VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
1094 
1095    VK_FROM_HANDLE(vk_physical_device, pdevice, wsi_device->pdevice);
1096    if (pdevice->supported_extensions.EXT_attachment_feedback_loop_layout)
1097       caps->supportedUsageFlags |= VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT;
1098 
1099    return VK_SUCCESS;
1100 }
1101 
1102 static VkResult
wsi_wl_surface_get_capabilities2(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,const void * info_next,VkSurfaceCapabilities2KHR * caps)1103 wsi_wl_surface_get_capabilities2(VkIcdSurfaceBase *surface,
1104                                  struct wsi_device *wsi_device,
1105                                  const void *info_next,
1106                                  VkSurfaceCapabilities2KHR* caps)
1107 {
1108    assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
1109 
1110    const VkSurfacePresentModeEXT *present_mode = vk_find_struct_const(info_next, SURFACE_PRESENT_MODE_EXT);
1111 
1112    VkResult result =
1113       wsi_wl_surface_get_capabilities(surface, wsi_device, present_mode,
1114                                       &caps->surfaceCapabilities);
1115 
1116    vk_foreach_struct(ext, caps->pNext) {
1117       switch (ext->sType) {
1118       case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: {
1119          VkSurfaceProtectedCapabilitiesKHR *protected = (void *)ext;
1120          protected->supportsProtected = VK_FALSE;
1121          break;
1122       }
1123 
1124       case VK_STRUCTURE_TYPE_SURFACE_PRESENT_SCALING_CAPABILITIES_EXT: {
1125          /* Unsupported. */
1126          VkSurfacePresentScalingCapabilitiesEXT *scaling = (void *)ext;
1127          scaling->supportedPresentScaling = 0;
1128          scaling->supportedPresentGravityX = 0;
1129          scaling->supportedPresentGravityY = 0;
1130          scaling->minScaledImageExtent = caps->surfaceCapabilities.minImageExtent;
1131          scaling->maxScaledImageExtent = caps->surfaceCapabilities.maxImageExtent;
1132          break;
1133       }
1134 
1135       case VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_COMPATIBILITY_EXT: {
1136          /* Can easily toggle between FIFO and MAILBOX on Wayland. */
1137          VkSurfacePresentModeCompatibilityEXT *compat = (void *)ext;
1138          if (compat->pPresentModes) {
1139             assert(present_mode);
1140             VK_OUTARRAY_MAKE_TYPED(VkPresentModeKHR, modes, compat->pPresentModes, &compat->presentModeCount);
1141             /* Must always return queried present mode even when truncating. */
1142             vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
1143                *mode = present_mode->presentMode;
1144             }
1145             switch (present_mode->presentMode) {
1146             case VK_PRESENT_MODE_MAILBOX_KHR:
1147                vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
1148                   *mode = VK_PRESENT_MODE_FIFO_KHR;
1149                }
1150                break;
1151             case VK_PRESENT_MODE_FIFO_KHR:
1152                vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
1153                   *mode = VK_PRESENT_MODE_MAILBOX_KHR;
1154                }
1155                break;
1156             default:
1157                break;
1158             }
1159          } else {
1160             if (!present_mode) {
1161                wsi_common_vk_warn_once("Use of VkSurfacePresentModeCompatibilityEXT "
1162                                        "without a VkSurfacePresentModeEXT set. This is an "
1163                                        "application bug.\n");
1164                compat->presentModeCount = 1;
1165             } else {
1166                switch (present_mode->presentMode) {
1167                case VK_PRESENT_MODE_MAILBOX_KHR:
1168                case VK_PRESENT_MODE_FIFO_KHR:
1169                   compat->presentModeCount = 2;
1170                   break;
1171                default:
1172                   compat->presentModeCount = 1;
1173                   break;
1174                }
1175             }
1176          }
1177          break;
1178       }
1179 
1180       default:
1181          /* Ignored */
1182          break;
1183       }
1184    }
1185 
1186    return result;
1187 }
1188 
1189 static VkResult
wsi_wl_surface_get_formats(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,uint32_t * pSurfaceFormatCount,VkSurfaceFormatKHR * pSurfaceFormats)1190 wsi_wl_surface_get_formats(VkIcdSurfaceBase *icd_surface,
1191 			   struct wsi_device *wsi_device,
1192                            uint32_t* pSurfaceFormatCount,
1193                            VkSurfaceFormatKHR* pSurfaceFormats)
1194 {
1195    VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
1196    struct wsi_wayland *wsi =
1197       (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
1198 
1199    struct wsi_wl_display display;
1200    if (wsi_wl_display_init(wsi, &display, surface->display, true,
1201                            wsi_device->sw))
1202       return VK_ERROR_SURFACE_LOST_KHR;
1203 
1204    VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormatKHR, out,
1205                           pSurfaceFormats, pSurfaceFormatCount);
1206 
1207    struct wsi_wl_format *disp_fmt;
1208    u_vector_foreach(disp_fmt, &display.formats) {
1209       /* Skip formats for which we can't support both alpha & opaque
1210        * formats.
1211        */
1212       if (!(disp_fmt->flags & WSI_WL_FMT_ALPHA) ||
1213           !(disp_fmt->flags & WSI_WL_FMT_OPAQUE))
1214          continue;
1215 
1216       vk_outarray_append_typed(VkSurfaceFormatKHR, &out, out_fmt) {
1217          out_fmt->format = disp_fmt->vk_format;
1218          out_fmt->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
1219       }
1220    }
1221 
1222    wsi_wl_display_finish(&display);
1223 
1224    return vk_outarray_status(&out);
1225 }
1226 
1227 static VkResult
wsi_wl_surface_get_formats2(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,const void * info_next,uint32_t * pSurfaceFormatCount,VkSurfaceFormat2KHR * pSurfaceFormats)1228 wsi_wl_surface_get_formats2(VkIcdSurfaceBase *icd_surface,
1229 			    struct wsi_device *wsi_device,
1230                             const void *info_next,
1231                             uint32_t* pSurfaceFormatCount,
1232                             VkSurfaceFormat2KHR* pSurfaceFormats)
1233 {
1234    VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
1235    struct wsi_wayland *wsi =
1236       (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
1237 
1238    struct wsi_wl_display display;
1239    if (wsi_wl_display_init(wsi, &display, surface->display, true,
1240                            wsi_device->sw))
1241       return VK_ERROR_SURFACE_LOST_KHR;
1242 
1243    VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormat2KHR, out,
1244                           pSurfaceFormats, pSurfaceFormatCount);
1245 
1246    struct wsi_wl_format *disp_fmt;
1247    u_vector_foreach(disp_fmt, &display.formats) {
1248       /* Skip formats for which we can't support both alpha & opaque
1249        * formats.
1250        */
1251       if (!(disp_fmt->flags & WSI_WL_FMT_ALPHA) ||
1252           !(disp_fmt->flags & WSI_WL_FMT_OPAQUE))
1253          continue;
1254 
1255       vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, out_fmt) {
1256          out_fmt->surfaceFormat.format = disp_fmt->vk_format;
1257          out_fmt->surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
1258       }
1259    }
1260 
1261    wsi_wl_display_finish(&display);
1262 
1263    return vk_outarray_status(&out);
1264 }
1265 
1266 static VkResult
wsi_wl_surface_get_present_modes(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,uint32_t * pPresentModeCount,VkPresentModeKHR * pPresentModes)1267 wsi_wl_surface_get_present_modes(VkIcdSurfaceBase *icd_surface,
1268                                  struct wsi_device *wsi_device,
1269                                  uint32_t* pPresentModeCount,
1270                                  VkPresentModeKHR* pPresentModes)
1271 {
1272    VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
1273    struct wsi_wayland *wsi =
1274       (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
1275 
1276    struct wsi_wl_display display;
1277    if (wsi_wl_display_init(wsi, &display, surface->display, true,
1278                            wsi_device->sw))
1279       return VK_ERROR_SURFACE_LOST_KHR;
1280 
1281    VkPresentModeKHR present_modes[3];
1282    uint32_t present_modes_count = 0;
1283 
1284    /* The following two modes are always supported */
1285    present_modes[present_modes_count++] = VK_PRESENT_MODE_MAILBOX_KHR;
1286    present_modes[present_modes_count++] = VK_PRESENT_MODE_FIFO_KHR;
1287 
1288    if (display.tearing_control_manager)
1289       present_modes[present_modes_count++] = VK_PRESENT_MODE_IMMEDIATE_KHR;
1290 
1291    assert(present_modes_count <= ARRAY_SIZE(present_modes));
1292    wsi_wl_display_finish(&display);
1293 
1294    if (pPresentModes == NULL) {
1295       *pPresentModeCount = present_modes_count;
1296       return VK_SUCCESS;
1297    }
1298 
1299    *pPresentModeCount = MIN2(*pPresentModeCount, present_modes_count);
1300    typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
1301 
1302    if (*pPresentModeCount < present_modes_count)
1303       return VK_INCOMPLETE;
1304    else
1305       return VK_SUCCESS;
1306 }
1307 
1308 static VkResult
wsi_wl_surface_get_present_rectangles(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,uint32_t * pRectCount,VkRect2D * pRects)1309 wsi_wl_surface_get_present_rectangles(VkIcdSurfaceBase *surface,
1310                                       struct wsi_device *wsi_device,
1311                                       uint32_t* pRectCount,
1312                                       VkRect2D* pRects)
1313 {
1314    VK_OUTARRAY_MAKE_TYPED(VkRect2D, out, pRects, pRectCount);
1315 
1316    vk_outarray_append_typed(VkRect2D, &out, rect) {
1317       /* We don't know a size so just return the usual "I don't know." */
1318       *rect = (VkRect2D) {
1319          .offset = { 0, 0 },
1320          .extent = { UINT32_MAX, UINT32_MAX },
1321       };
1322    }
1323 
1324    return vk_outarray_status(&out);
1325 }
1326 
1327 void
wsi_wl_surface_destroy(VkIcdSurfaceBase * icd_surface,VkInstance _instance,const VkAllocationCallbacks * pAllocator)1328 wsi_wl_surface_destroy(VkIcdSurfaceBase *icd_surface, VkInstance _instance,
1329                        const VkAllocationCallbacks *pAllocator)
1330 {
1331    VK_FROM_HANDLE(vk_instance, instance, _instance);
1332    struct wsi_wl_surface *wsi_wl_surface =
1333       wl_container_of((VkIcdSurfaceWayland *)icd_surface, wsi_wl_surface, base);
1334 
1335    if (wsi_wl_surface->wl_dmabuf_feedback) {
1336       zwp_linux_dmabuf_feedback_v1_destroy(wsi_wl_surface->wl_dmabuf_feedback);
1337       dmabuf_feedback_fini(&wsi_wl_surface->dmabuf_feedback);
1338       dmabuf_feedback_fini(&wsi_wl_surface->pending_dmabuf_feedback);
1339    }
1340 
1341    if (wsi_wl_surface->surface)
1342       wl_proxy_wrapper_destroy(wsi_wl_surface->surface);
1343 
1344    if (wsi_wl_surface->display)
1345       wsi_wl_display_destroy(wsi_wl_surface->display);
1346 
1347    vk_free2(&instance->alloc, pAllocator, wsi_wl_surface);
1348 }
1349 
1350 static struct wsi_wl_format *
pick_format_from_surface_dmabuf_feedback(struct wsi_wl_surface * wsi_wl_surface,VkFormat vk_format)1351 pick_format_from_surface_dmabuf_feedback(struct wsi_wl_surface *wsi_wl_surface,
1352                                          VkFormat vk_format)
1353 {
1354    struct wsi_wl_format *f = NULL;
1355 
1356    /* If the main_device was not advertised, we don't have valid feedback */
1357    if (wsi_wl_surface->dmabuf_feedback.main_device == 0)
1358       return NULL;
1359 
1360    util_dynarray_foreach(&wsi_wl_surface->dmabuf_feedback.tranches,
1361                          struct dmabuf_feedback_tranche, tranche) {
1362       f = find_format(&tranche->formats, vk_format);
1363       if (f)
1364          break;
1365    }
1366 
1367    return f;
1368 }
1369 
1370 static void
surface_dmabuf_feedback_format_table(void * data,struct zwp_linux_dmabuf_feedback_v1 * zwp_linux_dmabuf_feedback_v1,int32_t fd,uint32_t size)1371 surface_dmabuf_feedback_format_table(void *data,
1372                                      struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1,
1373                                      int32_t fd, uint32_t size)
1374 {
1375    struct wsi_wl_surface *wsi_wl_surface = data;
1376    struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
1377 
1378    feedback->format_table.size = size;
1379    feedback->format_table.data = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
1380 
1381    close(fd);
1382 }
1383 
1384 static void
surface_dmabuf_feedback_main_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)1385 surface_dmabuf_feedback_main_device(void *data,
1386                                     struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1387                                     struct wl_array *device)
1388 {
1389    struct wsi_wl_surface *wsi_wl_surface = data;
1390    struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
1391 
1392    memcpy(&feedback->main_device, device->data, sizeof(feedback->main_device));
1393 }
1394 
1395 static void
surface_dmabuf_feedback_tranche_target_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)1396 surface_dmabuf_feedback_tranche_target_device(void *data,
1397                                               struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1398                                               struct wl_array *device)
1399 {
1400    struct wsi_wl_surface *wsi_wl_surface = data;
1401    struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
1402 
1403    memcpy(&feedback->pending_tranche.target_device, device->data,
1404           sizeof(feedback->pending_tranche.target_device));
1405 }
1406 
1407 static void
surface_dmabuf_feedback_tranche_flags(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,uint32_t flags)1408 surface_dmabuf_feedback_tranche_flags(void *data,
1409                                       struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1410                                       uint32_t flags)
1411 {
1412    struct wsi_wl_surface *wsi_wl_surface = data;
1413    struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
1414 
1415    feedback->pending_tranche.flags = flags;
1416 }
1417 
1418 static void
surface_dmabuf_feedback_tranche_formats(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * indices)1419 surface_dmabuf_feedback_tranche_formats(void *data,
1420                                         struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1421                                         struct wl_array *indices)
1422 {
1423    struct wsi_wl_surface *wsi_wl_surface = data;
1424    struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
1425    uint32_t format;
1426    uint64_t modifier;
1427    uint16_t *index;
1428 
1429    /* Compositor may advertise or not a format table. If it does, we use it.
1430     * Otherwise, we steal the most recent advertised format table. If we don't have
1431     * a most recent advertised format table, compositor did something wrong. */
1432    if (feedback->format_table.data == NULL) {
1433       feedback->format_table = wsi_wl_surface->dmabuf_feedback.format_table;
1434       dmabuf_feedback_format_table_init(&wsi_wl_surface->dmabuf_feedback.format_table);
1435    }
1436    if (feedback->format_table.data == MAP_FAILED ||
1437        feedback->format_table.data == NULL)
1438       return;
1439 
1440    wl_array_for_each(index, indices) {
1441       format = feedback->format_table.data[*index].format;
1442       modifier = feedback->format_table.data[*index].modifier;
1443 
1444       wsi_wl_display_add_drm_format_modifier(wsi_wl_surface->display,
1445                         &wsi_wl_surface->pending_dmabuf_feedback.pending_tranche.formats,
1446                         format, modifier);
1447    }
1448 }
1449 
1450 static void
surface_dmabuf_feedback_tranche_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)1451 surface_dmabuf_feedback_tranche_done(void *data,
1452                                      struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
1453 {
1454    struct wsi_wl_surface *wsi_wl_surface = data;
1455    struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
1456 
1457    /* Add tranche to array of tranches. */
1458    util_dynarray_append(&feedback->tranches, struct dmabuf_feedback_tranche,
1459                         feedback->pending_tranche);
1460 
1461    dmabuf_feedback_tranche_init(&feedback->pending_tranche);
1462 }
1463 
1464 static bool
sets_of_modifiers_are_the_same(uint32_t num_drm_modifiers_A,const uint64_t * modifiers_A,uint32_t num_drm_modifiers_B,const uint64_t * modifiers_B)1465 sets_of_modifiers_are_the_same(uint32_t num_drm_modifiers_A, const uint64_t *modifiers_A,
1466                                uint32_t num_drm_modifiers_B, const uint64_t *modifiers_B)
1467 {
1468    uint32_t i, j;
1469    bool mod_found;
1470 
1471    if (num_drm_modifiers_A != num_drm_modifiers_B)
1472       return false;
1473 
1474    for (i = 0; i < num_drm_modifiers_A; i++) {
1475       mod_found = false;
1476       for (j = 0; j < num_drm_modifiers_B; j++) {
1477          if (modifiers_A[i] == modifiers_B[j]) {
1478             mod_found = true;
1479             break;
1480          }
1481       }
1482       if (!mod_found)
1483          return false;
1484    }
1485 
1486    return true;
1487 }
1488 
1489 static void
surface_dmabuf_feedback_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)1490 surface_dmabuf_feedback_done(void *data,
1491                              struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
1492 {
1493    struct wsi_wl_surface *wsi_wl_surface = data;
1494    struct wsi_wl_swapchain *chain = wsi_wl_surface->chain;
1495    struct wsi_wl_format *f;
1496 
1497    dmabuf_feedback_fini(&wsi_wl_surface->dmabuf_feedback);
1498    wsi_wl_surface->dmabuf_feedback = wsi_wl_surface->pending_dmabuf_feedback;
1499    dmabuf_feedback_init(&wsi_wl_surface->pending_dmabuf_feedback);
1500 
1501    /* It's not just because we received dma-buf feedback that re-allocation is a
1502     * good idea. In order to know if we should re-allocate or not, we must
1503     * compare the most recent parameters that we used to allocate with the ones
1504     * from the feedback we just received.
1505     *
1506     * The allocation parameters are: the format, its set of modifiers and the
1507     * tranche flags. On WSI we are not using the tranche flags for anything, so
1508     * we disconsider this. As we can't switch to another format (it is selected
1509     * by the client), we just need to compare the set of modifiers.
1510     *
1511     * So we just look for the vk_format in the tranches (respecting their
1512     * preferences), and compare its set of modifiers with the set of modifiers
1513     * we've used to allocate previously. If they differ, we are using suboptimal
1514     * parameters and should re-allocate.
1515     */
1516    f = pick_format_from_surface_dmabuf_feedback(wsi_wl_surface, chain->vk_format);
1517    if (f && !sets_of_modifiers_are_the_same(u_vector_length(&f->modifiers),
1518                                             u_vector_tail(&f->modifiers),
1519                                             chain->num_drm_modifiers,
1520                                             chain->drm_modifiers))
1521       wsi_wl_surface->chain->suboptimal = true;
1522 }
1523 
1524 static const struct zwp_linux_dmabuf_feedback_v1_listener
1525 surface_dmabuf_feedback_listener = {
1526    .format_table = surface_dmabuf_feedback_format_table,
1527    .main_device = surface_dmabuf_feedback_main_device,
1528    .tranche_target_device = surface_dmabuf_feedback_tranche_target_device,
1529    .tranche_flags = surface_dmabuf_feedback_tranche_flags,
1530    .tranche_formats = surface_dmabuf_feedback_tranche_formats,
1531    .tranche_done = surface_dmabuf_feedback_tranche_done,
1532    .done = surface_dmabuf_feedback_done,
1533 };
1534 
wsi_wl_surface_bind_to_dmabuf_feedback(struct wsi_wl_surface * wsi_wl_surface)1535 static VkResult wsi_wl_surface_bind_to_dmabuf_feedback(struct wsi_wl_surface *wsi_wl_surface)
1536 {
1537    wsi_wl_surface->wl_dmabuf_feedback =
1538       zwp_linux_dmabuf_v1_get_surface_feedback(wsi_wl_surface->display->wl_dmabuf,
1539                                                wsi_wl_surface->surface);
1540 
1541    zwp_linux_dmabuf_feedback_v1_add_listener(wsi_wl_surface->wl_dmabuf_feedback,
1542                                              &surface_dmabuf_feedback_listener,
1543                                              wsi_wl_surface);
1544 
1545    if (dmabuf_feedback_init(&wsi_wl_surface->dmabuf_feedback) < 0)
1546       goto fail;
1547    if (dmabuf_feedback_init(&wsi_wl_surface->pending_dmabuf_feedback) < 0)
1548       goto fail_pending;
1549 
1550    return VK_SUCCESS;
1551 
1552 fail_pending:
1553    dmabuf_feedback_fini(&wsi_wl_surface->dmabuf_feedback);
1554 fail:
1555    zwp_linux_dmabuf_feedback_v1_destroy(wsi_wl_surface->wl_dmabuf_feedback);
1556    wsi_wl_surface->wl_dmabuf_feedback = NULL;
1557    return VK_ERROR_OUT_OF_HOST_MEMORY;
1558 }
1559 
wsi_wl_surface_init(struct wsi_wl_surface * wsi_wl_surface,struct wsi_device * wsi_device)1560 static VkResult wsi_wl_surface_init(struct wsi_wl_surface *wsi_wl_surface,
1561                                     struct wsi_device *wsi_device)
1562 {
1563    struct wsi_wayland *wsi =
1564       (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
1565    VkResult result;
1566 
1567    /* wsi_wl_surface has already been initialized. */
1568    if (wsi_wl_surface->display)
1569       return VK_SUCCESS;
1570 
1571    result = wsi_wl_display_create(wsi, wsi_wl_surface->base.display,
1572                                   wsi_device->sw, &wsi_wl_surface->display);
1573    if (result != VK_SUCCESS)
1574       goto fail;
1575 
1576    wsi_wl_surface->surface = wl_proxy_create_wrapper(wsi_wl_surface->base.surface);
1577    if (!wsi_wl_surface->surface) {
1578       result = VK_ERROR_OUT_OF_HOST_MEMORY;
1579       goto fail;
1580    }
1581    wl_proxy_set_queue((struct wl_proxy *) wsi_wl_surface->surface,
1582                       wsi_wl_surface->display->queue);
1583 
1584    /* Bind wsi_wl_surface to dma-buf feedback. */
1585    if (wsi_wl_surface->display->wl_dmabuf &&
1586        zwp_linux_dmabuf_v1_get_version(wsi_wl_surface->display->wl_dmabuf) >=
1587        ZWP_LINUX_DMABUF_V1_GET_SURFACE_FEEDBACK_SINCE_VERSION) {
1588       result = wsi_wl_surface_bind_to_dmabuf_feedback(wsi_wl_surface);
1589       if (result != VK_SUCCESS)
1590          goto fail;
1591 
1592       wl_display_roundtrip_queue(wsi_wl_surface->display->wl_display,
1593                                  wsi_wl_surface->display->queue);
1594    }
1595 
1596    return VK_SUCCESS;
1597 
1598 fail:
1599    if (wsi_wl_surface->surface)
1600       wl_proxy_wrapper_destroy(wsi_wl_surface->surface);
1601 
1602    if (wsi_wl_surface->display)
1603       wsi_wl_display_destroy(wsi_wl_surface->display);
1604    return result;
1605 }
1606 
1607 VKAPI_ATTR VkResult VKAPI_CALL
wsi_CreateWaylandSurfaceKHR(VkInstance _instance,const VkWaylandSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)1608 wsi_CreateWaylandSurfaceKHR(VkInstance _instance,
1609                             const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
1610                             const VkAllocationCallbacks *pAllocator,
1611                             VkSurfaceKHR *pSurface)
1612 {
1613    VK_FROM_HANDLE(vk_instance, instance, _instance);
1614    struct wsi_wl_surface *wsi_wl_surface;
1615    VkIcdSurfaceWayland *surface;
1616 
1617    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR);
1618 
1619    wsi_wl_surface = vk_zalloc2(&instance->alloc, pAllocator, sizeof *wsi_wl_surface,
1620                                8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1621    if (wsi_wl_surface == NULL)
1622       return VK_ERROR_OUT_OF_HOST_MEMORY;
1623 
1624    surface = &wsi_wl_surface->base;
1625 
1626    surface->base.platform = VK_ICD_WSI_PLATFORM_WAYLAND;
1627    surface->display = pCreateInfo->display;
1628    surface->surface = pCreateInfo->surface;
1629 
1630    *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
1631 
1632    return VK_SUCCESS;
1633 }
1634 
1635 struct wsi_wl_present_id {
1636    struct wp_presentation_feedback *feedback;
1637    uint64_t present_id;
1638    const VkAllocationCallbacks *alloc;
1639    struct wsi_wl_swapchain *chain;
1640    struct wl_list link;
1641 };
1642 
1643 static struct wsi_image *
wsi_wl_swapchain_get_wsi_image(struct wsi_swapchain * wsi_chain,uint32_t image_index)1644 wsi_wl_swapchain_get_wsi_image(struct wsi_swapchain *wsi_chain,
1645                                uint32_t image_index)
1646 {
1647    struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
1648    return &chain->images[image_index].base;
1649 }
1650 
1651 static VkResult
wsi_wl_swapchain_release_images(struct wsi_swapchain * wsi_chain,uint32_t count,const uint32_t * indices)1652 wsi_wl_swapchain_release_images(struct wsi_swapchain *wsi_chain,
1653                                 uint32_t count, const uint32_t *indices)
1654 {
1655    struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
1656    for (uint32_t i = 0; i < count; i++) {
1657       uint32_t index = indices[i];
1658       assert(chain->images[index].busy);
1659       chain->images[index].busy = false;
1660    }
1661    return VK_SUCCESS;
1662 }
1663 
1664 static void
wsi_wl_swapchain_set_present_mode(struct wsi_swapchain * wsi_chain,VkPresentModeKHR mode)1665 wsi_wl_swapchain_set_present_mode(struct wsi_swapchain *wsi_chain,
1666                                   VkPresentModeKHR mode)
1667 {
1668    struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
1669    chain->base.present_mode = mode;
1670 }
1671 
1672 static VkResult
wsi_wl_swapchain_wait_for_present(struct wsi_swapchain * wsi_chain,uint64_t present_id,uint64_t timeout)1673 wsi_wl_swapchain_wait_for_present(struct wsi_swapchain *wsi_chain,
1674                                   uint64_t present_id,
1675                                   uint64_t timeout)
1676 {
1677    struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
1678    struct wl_display *wl_display = chain->wsi_wl_surface->display->wl_display;
1679    struct timespec end_time;
1680    int wl_fd = wl_display_get_fd(wl_display);
1681    VkResult ret;
1682    int err;
1683 
1684    uint64_t atimeout;
1685    if (timeout == 0 || timeout == UINT64_MAX)
1686       atimeout = timeout;
1687    else
1688       atimeout = os_time_get_absolute_timeout(timeout);
1689 
1690    timespec_from_nsec(&end_time, atimeout);
1691 
1692    /* Need to observe that the swapchain semaphore has been unsignalled,
1693     * as this is guaranteed when a present is complete. */
1694    VkResult result = wsi_swapchain_wait_for_present_semaphore(
1695          &chain->base, present_id, timeout);
1696    if (result != VK_SUCCESS)
1697       return result;
1698 
1699    if (!chain->present_ids.wp_presentation) {
1700       /* If we're enabling present wait despite the protocol not being supported,
1701        * use best effort not to crash, even if result will not be correct.
1702        * For correctness, we must at least wait for the timeline semaphore to complete. */
1703       return VK_SUCCESS;
1704    }
1705 
1706    /* PresentWait can be called concurrently.
1707     * If there is contention on this mutex, it means there is currently a dispatcher in flight holding the lock.
1708     * The lock is only held while there is forward progress processing events from Wayland,
1709     * so there should be no problem locking without timeout.
1710     * We would like to be able to support timeout = 0 to query the current max_completed count.
1711     * A timedlock with no timeout can be problematic in that scenario. */
1712    err = pthread_mutex_lock(&chain->present_ids.lock);
1713    if (err != 0)
1714       return VK_ERROR_OUT_OF_DATE_KHR;
1715 
1716    if (chain->present_ids.max_completed >= present_id) {
1717       pthread_mutex_unlock(&chain->present_ids.lock);
1718       return VK_SUCCESS;
1719    }
1720 
1721    /* Someone else is dispatching events; wait for them to update the chain
1722     * status and wake us up. */
1723    while (chain->present_ids.dispatch_in_progress) {
1724       /* We only own the lock when the wait succeeds. */
1725       err = pthread_cond_timedwait(&chain->present_ids.list_advanced,
1726                                    &chain->present_ids.lock, &end_time);
1727 
1728       if (err == ETIMEDOUT) {
1729          pthread_mutex_unlock(&chain->present_ids.lock);
1730          return VK_TIMEOUT;
1731       } else if (err != 0) {
1732          pthread_mutex_unlock(&chain->present_ids.lock);
1733          return VK_ERROR_OUT_OF_DATE_KHR;
1734       }
1735 
1736       if (chain->present_ids.max_completed >= present_id) {
1737          pthread_mutex_unlock(&chain->present_ids.lock);
1738          return VK_SUCCESS;
1739       }
1740 
1741       /* Whoever was previously dispatching the events isn't anymore, so we
1742        * will take over and fall through below. */
1743       if (!chain->present_ids.dispatch_in_progress)
1744          break;
1745    }
1746 
1747    assert(!chain->present_ids.dispatch_in_progress);
1748    chain->present_ids.dispatch_in_progress = true;
1749 
1750    /* Whether or not we were dispatching the events before, we are now: pull
1751     * all the new events from our event queue, post them, and wake up everyone
1752     * else who might be waiting. */
1753    while (1) {
1754       ret = wl_display_dispatch_queue_pending(wl_display, chain->present_ids.queue);
1755       if (ret < 0) {
1756          ret = VK_ERROR_OUT_OF_DATE_KHR;
1757          goto relinquish_dispatch;
1758       }
1759 
1760       /* Some events dispatched: check the new completions. */
1761       if (ret > 0) {
1762          /* Completed our own present; stop our own dispatching and let
1763           * someone else pick it up. */
1764          if (chain->present_ids.max_completed >= present_id) {
1765             ret = VK_SUCCESS;
1766             goto relinquish_dispatch;
1767          }
1768 
1769          /* Wake up other waiters who may have been unblocked by the events
1770           * we just read. */
1771          pthread_cond_broadcast(&chain->present_ids.list_advanced);
1772       }
1773 
1774       /* Check for timeout, and relinquish the dispatch to another thread
1775        * if we're over our budget. */
1776       uint64_t current_time_nsec = os_time_get_nano();
1777       if (current_time_nsec > atimeout) {
1778          ret = VK_TIMEOUT;
1779          goto relinquish_dispatch;
1780       }
1781 
1782       /* To poll and read from WL fd safely, we must be cooperative.
1783        * See wl_display_prepare_read_queue in https://wayland.freedesktop.org/docs/html/apb.html */
1784 
1785       /* Try to read events from the server. */
1786       ret = wl_display_prepare_read_queue(wl_display, chain->present_ids.queue);
1787       if (ret < 0) {
1788          /* Another thread might have read events for our queue already. Go
1789           * back to dispatch them.
1790           */
1791          if (errno == EAGAIN)
1792             continue;
1793          ret = VK_ERROR_OUT_OF_DATE_KHR;
1794          goto relinquish_dispatch;
1795       }
1796 
1797       /* Drop the lock around poll, so people can wait whilst we sleep. */
1798       pthread_mutex_unlock(&chain->present_ids.lock);
1799 
1800       struct pollfd pollfd = {
1801          .fd = wl_fd,
1802          .events = POLLIN
1803       };
1804       struct timespec current_time, rel_timeout;
1805       timespec_from_nsec(&current_time, current_time_nsec);
1806       timespec_sub(&rel_timeout, &end_time, &current_time);
1807       ret = ppoll(&pollfd, 1, &rel_timeout, NULL);
1808 
1809       /* Re-lock after poll; either we're dispatching events under the lock or
1810        * bouncing out from an error also under the lock. We can't use timedlock
1811        * here because we need to acquire to clear dispatch_in_progress. */
1812       pthread_mutex_lock(&chain->present_ids.lock);
1813 
1814       if (ret <= 0) {
1815          int lerrno = errno;
1816          wl_display_cancel_read(wl_display);
1817          if (ret < 0) {
1818             /* If ppoll() was interrupted, try again. */
1819             if (lerrno == EINTR || lerrno == EAGAIN)
1820                continue;
1821             ret = VK_ERROR_OUT_OF_DATE_KHR;
1822             goto relinquish_dispatch;
1823          }
1824          assert(ret == 0);
1825          continue;
1826       }
1827 
1828       ret = wl_display_read_events(wl_display);
1829       if (ret < 0) {
1830          ret = VK_ERROR_OUT_OF_DATE_KHR;
1831          goto relinquish_dispatch;
1832       }
1833    }
1834 
1835 relinquish_dispatch:
1836    assert(chain->present_ids.dispatch_in_progress);
1837    chain->present_ids.dispatch_in_progress = false;
1838    pthread_cond_broadcast(&chain->present_ids.list_advanced);
1839    pthread_mutex_unlock(&chain->present_ids.lock);
1840    return ret;
1841 }
1842 
1843 static VkResult
wsi_wl_swapchain_acquire_next_image(struct wsi_swapchain * wsi_chain,const VkAcquireNextImageInfoKHR * info,uint32_t * image_index)1844 wsi_wl_swapchain_acquire_next_image(struct wsi_swapchain *wsi_chain,
1845                                     const VkAcquireNextImageInfoKHR *info,
1846                                     uint32_t *image_index)
1847 {
1848    struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
1849    struct wsi_wl_surface *wsi_wl_surface = chain->wsi_wl_surface;
1850    struct timespec start_time, end_time;
1851    struct timespec rel_timeout;
1852    int wl_fd = wl_display_get_fd(wsi_wl_surface->display->wl_display);
1853 
1854    timespec_from_nsec(&rel_timeout, info->timeout);
1855 
1856    clock_gettime(CLOCK_MONOTONIC, &start_time);
1857    timespec_add(&end_time, &rel_timeout, &start_time);
1858 
1859    while (1) {
1860       /* Try to dispatch potential events. */
1861       int ret = wl_display_dispatch_queue_pending(wsi_wl_surface->display->wl_display,
1862                                                   wsi_wl_surface->display->queue);
1863       if (ret < 0)
1864          return VK_ERROR_OUT_OF_DATE_KHR;
1865 
1866       /* Try to find a free image. */
1867       for (uint32_t i = 0; i < chain->base.image_count; i++) {
1868          if (!chain->images[i].busy) {
1869             /* We found a non-busy image */
1870             *image_index = i;
1871             chain->images[i].busy = true;
1872             return (chain->suboptimal ? VK_SUBOPTIMAL_KHR : VK_SUCCESS);
1873          }
1874       }
1875 
1876       /* Check for timeout. */
1877       struct timespec current_time;
1878       clock_gettime(CLOCK_MONOTONIC, &current_time);
1879       if (timespec_after(&current_time, &end_time))
1880          return (info->timeout ? VK_TIMEOUT : VK_NOT_READY);
1881 
1882       /* Try to read events from the server. */
1883       ret = wl_display_prepare_read_queue(wsi_wl_surface->display->wl_display,
1884                                           wsi_wl_surface->display->queue);
1885       if (ret < 0) {
1886          /* Another thread might have read events for our queue already. Go
1887           * back to dispatch them.
1888           */
1889          if (errno == EAGAIN)
1890             continue;
1891          return VK_ERROR_OUT_OF_DATE_KHR;
1892       }
1893 
1894       struct pollfd pollfd = {
1895          .fd = wl_fd,
1896          .events = POLLIN
1897       };
1898       timespec_sub(&rel_timeout, &end_time, &current_time);
1899       ret = ppoll(&pollfd, 1, &rel_timeout, NULL);
1900       if (ret <= 0) {
1901          int lerrno = errno;
1902          wl_display_cancel_read(wsi_wl_surface->display->wl_display);
1903          if (ret < 0) {
1904             /* If ppoll() was interrupted, try again. */
1905             if (lerrno == EINTR || lerrno == EAGAIN)
1906                continue;
1907             return VK_ERROR_OUT_OF_DATE_KHR;
1908          }
1909          assert(ret == 0);
1910          continue;
1911       }
1912 
1913       ret = wl_display_read_events(wsi_wl_surface->display->wl_display);
1914       if (ret < 0)
1915          return VK_ERROR_OUT_OF_DATE_KHR;
1916    }
1917 }
1918 
1919 static void
presentation_handle_sync_output(void * data,struct wp_presentation_feedback * feedback,struct wl_output * output)1920 presentation_handle_sync_output(void *data,
1921                                 struct wp_presentation_feedback *feedback,
1922                                 struct wl_output *output)
1923 {
1924 }
1925 
1926 static void
presentation_handle_presented(void * data,struct wp_presentation_feedback * feedback,uint32_t tv_sec_hi,uint32_t tv_sec_lo,uint32_t tv_nsec,uint32_t refresh,uint32_t seq_hi,uint32_t seq_lo,uint32_t flags)1927 presentation_handle_presented(void *data,
1928                               struct wp_presentation_feedback *feedback,
1929                               uint32_t tv_sec_hi, uint32_t tv_sec_lo,
1930                               uint32_t tv_nsec, uint32_t refresh,
1931                               uint32_t seq_hi, uint32_t seq_lo,
1932                               uint32_t flags)
1933 {
1934    struct wsi_wl_present_id *id = data;
1935 
1936    /* present_ids.lock already held around dispatch */
1937    if (id->present_id > id->chain->present_ids.max_completed)
1938       id->chain->present_ids.max_completed = id->present_id;
1939 
1940    wp_presentation_feedback_destroy(feedback);
1941    wl_list_remove(&id->link);
1942    vk_free(id->alloc, id);
1943 }
1944 
1945 static void
presentation_handle_discarded(void * data,struct wp_presentation_feedback * feedback)1946 presentation_handle_discarded(void *data,
1947                               struct wp_presentation_feedback *feedback)
1948 {
1949    struct wsi_wl_present_id *id = data;
1950 
1951    /* present_ids.lock already held around dispatch */
1952    if (id->present_id > id->chain->present_ids.max_completed)
1953       id->chain->present_ids.max_completed = id->present_id;
1954 
1955    wp_presentation_feedback_destroy(feedback);
1956    wl_list_remove(&id->link);
1957    vk_free(id->alloc, id);
1958 }
1959 
1960 static const struct wp_presentation_feedback_listener
1961       pres_feedback_listener = {
1962    presentation_handle_sync_output,
1963    presentation_handle_presented,
1964    presentation_handle_discarded,
1965 };
1966 
1967 static void
frame_handle_done(void * data,struct wl_callback * callback,uint32_t serial)1968 frame_handle_done(void *data, struct wl_callback *callback, uint32_t serial)
1969 {
1970    struct wsi_wl_swapchain *chain = data;
1971 
1972    chain->frame = NULL;
1973    chain->fifo_ready = true;
1974 
1975    wl_callback_destroy(callback);
1976 }
1977 
1978 static const struct wl_callback_listener frame_listener = {
1979    frame_handle_done,
1980 };
1981 
1982 static VkResult
wsi_wl_swapchain_queue_present(struct wsi_swapchain * wsi_chain,uint32_t image_index,uint64_t present_id,const VkPresentRegionKHR * damage)1983 wsi_wl_swapchain_queue_present(struct wsi_swapchain *wsi_chain,
1984                                uint32_t image_index,
1985                                uint64_t present_id,
1986                                const VkPresentRegionKHR *damage)
1987 {
1988    struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
1989    struct wsi_wl_surface *wsi_wl_surface = chain->wsi_wl_surface;
1990 
1991    if (chain->buffer_type == WSI_WL_BUFFER_SHM_MEMCPY) {
1992       struct wsi_wl_image *image = &chain->images[image_index];
1993       memcpy(image->shm_ptr, image->base.cpu_map,
1994              image->base.row_pitches[0] * chain->extent.height);
1995    }
1996 
1997    /* For EXT_swapchain_maintenance1. We might have transitioned from FIFO to MAILBOX.
1998     * In this case we need to let the FIFO request complete, before presenting MAILBOX. */
1999    while (!chain->fifo_ready) {
2000       int ret = wl_display_dispatch_queue(wsi_wl_surface->display->wl_display,
2001                                           wsi_wl_surface->display->queue);
2002       if (ret < 0)
2003          return VK_ERROR_OUT_OF_DATE_KHR;
2004    }
2005 
2006    assert(image_index < chain->base.image_count);
2007    wl_surface_attach(wsi_wl_surface->surface, chain->images[image_index].buffer, 0, 0);
2008 
2009    if (wl_surface_get_version(wsi_wl_surface->surface) >= 4 && damage &&
2010        damage->pRectangles && damage->rectangleCount > 0) {
2011       for (unsigned i = 0; i < damage->rectangleCount; i++) {
2012          const VkRectLayerKHR *rect = &damage->pRectangles[i];
2013          assert(rect->layer == 0);
2014          wl_surface_damage_buffer(wsi_wl_surface->surface,
2015                                   rect->offset.x, rect->offset.y,
2016                                   rect->extent.width, rect->extent.height);
2017       }
2018    } else {
2019       wl_surface_damage(wsi_wl_surface->surface, 0, 0, INT32_MAX, INT32_MAX);
2020    }
2021 
2022    if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) {
2023       chain->frame = wl_surface_frame(wsi_wl_surface->surface);
2024       wl_callback_add_listener(chain->frame, &frame_listener, chain);
2025       chain->fifo_ready = false;
2026    } else {
2027       /* If we present MAILBOX, any subsequent presentation in FIFO can replace this image. */
2028       chain->fifo_ready = true;
2029    }
2030 
2031    if (present_id > 0 && chain->present_ids.wp_presentation) {
2032       struct wsi_wl_present_id *id =
2033          vk_zalloc(chain->wsi_wl_surface->display->wsi_wl->alloc, sizeof(*id), sizeof(uintptr_t),
2034                    VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2035       id->chain = chain;
2036       id->present_id = present_id;
2037       id->alloc = chain->wsi_wl_surface->display->wsi_wl->alloc;
2038 
2039       pthread_mutex_lock(&chain->present_ids.lock);
2040       id->feedback = wp_presentation_feedback(chain->present_ids.wp_presentation,
2041                                               chain->wsi_wl_surface->surface);
2042       wp_presentation_feedback_add_listener(id->feedback,
2043                                             &pres_feedback_listener,
2044                                             id);
2045       wl_list_insert(&chain->present_ids.outstanding_list, &id->link);
2046       pthread_mutex_unlock(&chain->present_ids.lock);
2047    }
2048 
2049    chain->images[image_index].busy = true;
2050    wl_surface_commit(wsi_wl_surface->surface);
2051    wl_display_flush(wsi_wl_surface->display->wl_display);
2052 
2053    return VK_SUCCESS;
2054 }
2055 
2056 static void
buffer_handle_release(void * data,struct wl_buffer * buffer)2057 buffer_handle_release(void *data, struct wl_buffer *buffer)
2058 {
2059    struct wsi_wl_image *image = data;
2060 
2061    assert(image->buffer == buffer);
2062 
2063    image->busy = false;
2064 }
2065 
2066 static const struct wl_buffer_listener buffer_listener = {
2067    buffer_handle_release,
2068 };
2069 
2070 static uint8_t *
wsi_wl_alloc_image_shm(struct wsi_image * imagew,unsigned size)2071 wsi_wl_alloc_image_shm(struct wsi_image *imagew, unsigned size)
2072 {
2073    struct wsi_wl_image *image = (struct wsi_wl_image *)imagew;
2074 
2075    /* Create a shareable buffer */
2076    int fd = os_create_anonymous_file(size, NULL);
2077    if (fd < 0)
2078       return NULL;
2079 
2080    void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
2081    if (ptr == MAP_FAILED) {
2082       close(fd);
2083       return NULL;
2084    }
2085 
2086    image->shm_fd = fd;
2087    image->shm_ptr = ptr;
2088    image->shm_size = size;
2089 
2090    return ptr;
2091 }
2092 
2093 static VkResult
wsi_wl_image_init(struct wsi_wl_swapchain * chain,struct wsi_wl_image * image,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator)2094 wsi_wl_image_init(struct wsi_wl_swapchain *chain,
2095                   struct wsi_wl_image *image,
2096                   const VkSwapchainCreateInfoKHR *pCreateInfo,
2097                   const VkAllocationCallbacks* pAllocator)
2098 {
2099    struct wsi_wl_display *display = chain->wsi_wl_surface->display;
2100    VkResult result;
2101 
2102    result = wsi_create_image(&chain->base, &chain->base.image_info,
2103                              &image->base);
2104    if (result != VK_SUCCESS)
2105       return result;
2106 
2107    switch (chain->buffer_type) {
2108    case WSI_WL_BUFFER_GPU_SHM:
2109    case WSI_WL_BUFFER_SHM_MEMCPY: {
2110       if (chain->buffer_type == WSI_WL_BUFFER_SHM_MEMCPY) {
2111          wsi_wl_alloc_image_shm(&image->base, image->base.row_pitches[0] *
2112                                               chain->extent.height);
2113       }
2114       assert(image->shm_ptr != NULL);
2115 
2116       /* Share it in a wl_buffer */
2117       struct wl_shm_pool *pool = wl_shm_create_pool(display->wl_shm,
2118                                                     image->shm_fd,
2119                                                     image->shm_size);
2120       wl_proxy_set_queue((struct wl_proxy *)pool, display->queue);
2121       image->buffer = wl_shm_pool_create_buffer(pool, 0, chain->extent.width,
2122                                                 chain->extent.height,
2123                                                 image->base.row_pitches[0],
2124                                                 chain->shm_format);
2125       wl_shm_pool_destroy(pool);
2126       break;
2127    }
2128 
2129    case WSI_WL_BUFFER_NATIVE: {
2130       assert(display->wl_dmabuf);
2131 
2132       struct zwp_linux_buffer_params_v1 *params =
2133          zwp_linux_dmabuf_v1_create_params(display->wl_dmabuf);
2134       if (!params)
2135          goto fail_image;
2136 
2137       for (int i = 0; i < image->base.num_planes; i++) {
2138          zwp_linux_buffer_params_v1_add(params,
2139                                         image->base.dma_buf_fd,
2140                                         i,
2141                                         image->base.offsets[i],
2142                                         image->base.row_pitches[i],
2143                                         image->base.drm_modifier >> 32,
2144                                         image->base.drm_modifier & 0xffffffff);
2145       }
2146 
2147       image->buffer =
2148          zwp_linux_buffer_params_v1_create_immed(params,
2149                                                  chain->extent.width,
2150                                                  chain->extent.height,
2151                                                  chain->drm_format,
2152                                                  0);
2153       zwp_linux_buffer_params_v1_destroy(params);
2154       break;
2155    }
2156 
2157    default:
2158       unreachable("Invalid buffer type");
2159    }
2160 
2161    if (!image->buffer)
2162       goto fail_image;
2163 
2164    wl_buffer_add_listener(image->buffer, &buffer_listener, image);
2165 
2166    return VK_SUCCESS;
2167 
2168 fail_image:
2169    wsi_destroy_image(&chain->base, &image->base);
2170 
2171    return VK_ERROR_OUT_OF_HOST_MEMORY;
2172 }
2173 
2174 static void
wsi_wl_swapchain_images_free(struct wsi_wl_swapchain * chain)2175 wsi_wl_swapchain_images_free(struct wsi_wl_swapchain *chain)
2176 {
2177    for (uint32_t i = 0; i < chain->base.image_count; i++) {
2178       if (chain->images[i].buffer) {
2179          wl_buffer_destroy(chain->images[i].buffer);
2180          wsi_destroy_image(&chain->base, &chain->images[i].base);
2181          if (chain->images[i].shm_size) {
2182             close(chain->images[i].shm_fd);
2183             munmap(chain->images[i].shm_ptr, chain->images[i].shm_size);
2184          }
2185       }
2186    }
2187 }
2188 
2189 static void
wsi_wl_swapchain_chain_free(struct wsi_wl_swapchain * chain,const VkAllocationCallbacks * pAllocator)2190 wsi_wl_swapchain_chain_free(struct wsi_wl_swapchain *chain,
2191                             const VkAllocationCallbacks *pAllocator)
2192 {
2193    if (chain->frame)
2194       wl_callback_destroy(chain->frame);
2195    if (chain->tearing_control)
2196       wp_tearing_control_v1_destroy(chain->tearing_control);
2197    if (chain->wsi_wl_surface)
2198       chain->wsi_wl_surface->chain = NULL;
2199 
2200    if (chain->present_ids.wp_presentation) {
2201       assert(!chain->present_ids.dispatch_in_progress);
2202 
2203       /* In VK_EXT_swapchain_maintenance1 there is no requirement to wait for all present IDs to be complete.
2204        * Waiting for the swapchain fence is enough.
2205        * Just clean up anything user did not wait for. */
2206       struct wsi_wl_present_id *id, *tmp;
2207       wl_list_for_each_safe(id, tmp, &chain->present_ids.outstanding_list, link) {
2208          wp_presentation_feedback_destroy(id->feedback);
2209          wl_list_remove(&id->link);
2210          vk_free(id->alloc, id);
2211       }
2212 
2213       wl_proxy_wrapper_destroy(chain->present_ids.wp_presentation);
2214       pthread_cond_destroy(&chain->present_ids.list_advanced);
2215       pthread_mutex_destroy(&chain->present_ids.lock);
2216    }
2217 
2218    wsi_swapchain_finish(&chain->base);
2219 }
2220 
2221 static VkResult
wsi_wl_swapchain_destroy(struct wsi_swapchain * wsi_chain,const VkAllocationCallbacks * pAllocator)2222 wsi_wl_swapchain_destroy(struct wsi_swapchain *wsi_chain,
2223                          const VkAllocationCallbacks *pAllocator)
2224 {
2225    struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
2226 
2227    wsi_wl_swapchain_images_free(chain);
2228    wsi_wl_swapchain_chain_free(chain, pAllocator);
2229 
2230    vk_free(pAllocator, chain);
2231 
2232    return VK_SUCCESS;
2233 }
2234 
2235 static VkResult
wsi_wl_surface_create_swapchain(VkIcdSurfaceBase * icd_surface,VkDevice device,struct wsi_device * wsi_device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,struct wsi_swapchain ** swapchain_out)2236 wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
2237                                 VkDevice device,
2238                                 struct wsi_device *wsi_device,
2239                                 const VkSwapchainCreateInfoKHR* pCreateInfo,
2240                                 const VkAllocationCallbacks* pAllocator,
2241                                 struct wsi_swapchain **swapchain_out)
2242 {
2243    struct wsi_wl_surface *wsi_wl_surface =
2244       wl_container_of((VkIcdSurfaceWayland *)icd_surface, wsi_wl_surface, base);
2245    struct wsi_wl_swapchain *chain;
2246    VkResult result;
2247 
2248    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
2249 
2250    int num_images = pCreateInfo->minImageCount;
2251 
2252    size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
2253    chain = vk_zalloc(pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2254    if (chain == NULL)
2255       return VK_ERROR_OUT_OF_HOST_MEMORY;
2256 
2257    /* We are taking ownership of the wsi_wl_surface, so remove ownership from
2258     * oldSwapchain. If the surface is currently owned by a swapchain that is
2259     * not oldSwapchain we return an error.
2260     */
2261    if (wsi_wl_surface->chain &&
2262        wsi_swapchain_to_handle(&wsi_wl_surface->chain->base) != pCreateInfo->oldSwapchain) {
2263       return VK_ERROR_NATIVE_WINDOW_IN_USE_KHR;
2264    }
2265    if (pCreateInfo->oldSwapchain) {
2266       VK_FROM_HANDLE(wsi_wl_swapchain, old_chain, pCreateInfo->oldSwapchain);
2267       old_chain->wsi_wl_surface = NULL;
2268       if (old_chain->tearing_control) {
2269          wp_tearing_control_v1_destroy(old_chain->tearing_control);
2270          old_chain->tearing_control = NULL;
2271       }
2272    }
2273 
2274    /* Take ownership of the wsi_wl_surface */
2275    chain->wsi_wl_surface = wsi_wl_surface;
2276    wsi_wl_surface->chain = chain;
2277 
2278    result = wsi_wl_surface_init(wsi_wl_surface, wsi_device);
2279    if (result != VK_SUCCESS)
2280       goto fail;
2281 
2282    VkPresentModeKHR present_mode = wsi_swapchain_get_present_mode(wsi_device, pCreateInfo);
2283    if (present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR) {
2284       chain->tearing_control =
2285          wp_tearing_control_manager_v1_get_tearing_control(wsi_wl_surface->display->tearing_control_manager,
2286                                                            wsi_wl_surface->surface);
2287       if (!chain->tearing_control) {
2288          result = VK_ERROR_OUT_OF_HOST_MEMORY;
2289          goto fail;
2290       }
2291       wp_tearing_control_v1_set_presentation_hint(chain->tearing_control,
2292                                                           WP_TEARING_CONTROL_V1_PRESENTATION_HINT_ASYNC);
2293    }
2294 
2295    enum wsi_wl_buffer_type buffer_type;
2296    struct wsi_base_image_params *image_params = NULL;
2297    struct wsi_cpu_image_params cpu_image_params;
2298    struct wsi_drm_image_params drm_image_params;
2299    uint32_t num_drm_modifiers = 0;
2300    const uint64_t *drm_modifiers = NULL;
2301    if (wsi_device->sw) {
2302       cpu_image_params = (struct wsi_cpu_image_params) {
2303          .base.image_type = WSI_IMAGE_TYPE_CPU,
2304       };
2305       if (wsi_device->has_import_memory_host &&
2306           !(WSI_DEBUG & WSI_DEBUG_NOSHM)) {
2307          buffer_type = WSI_WL_BUFFER_GPU_SHM;
2308          cpu_image_params.alloc_shm = wsi_wl_alloc_image_shm;
2309       } else {
2310          buffer_type = WSI_WL_BUFFER_SHM_MEMCPY;
2311       }
2312       image_params = &cpu_image_params.base;
2313    } else {
2314       drm_image_params = (struct wsi_drm_image_params) {
2315          .base.image_type = WSI_IMAGE_TYPE_DRM,
2316          .same_gpu = wsi_wl_surface->display->same_gpu,
2317       };
2318       /* Use explicit DRM format modifiers when both the server and the driver
2319        * support them.
2320        */
2321       if (wsi_wl_surface->display->wl_dmabuf && wsi_device->supports_modifiers) {
2322          struct wsi_wl_format *f = NULL;
2323          /* Try to select modifiers for our vk_format from surface dma-buf
2324           * feedback. If that doesn't work, fallback to the list of supported
2325           * formats/modifiers by the display. */
2326          if (wsi_wl_surface->wl_dmabuf_feedback)
2327             f = pick_format_from_surface_dmabuf_feedback(wsi_wl_surface,
2328                                                          pCreateInfo->imageFormat);
2329          if (f == NULL)
2330             f = find_format(&chain->wsi_wl_surface->display->formats,
2331                             pCreateInfo->imageFormat);
2332          if (f != NULL) {
2333             num_drm_modifiers = u_vector_length(&f->modifiers);
2334             drm_modifiers = u_vector_tail(&f->modifiers);
2335             if (num_drm_modifiers > 0)
2336                drm_image_params.num_modifier_lists = 1;
2337             else
2338                drm_image_params.num_modifier_lists = 0;
2339             drm_image_params.num_modifiers = &num_drm_modifiers;
2340             drm_image_params.modifiers = &drm_modifiers;
2341          }
2342       }
2343       buffer_type = WSI_WL_BUFFER_NATIVE;
2344       image_params = &drm_image_params.base;
2345    }
2346 
2347    result = wsi_swapchain_init(wsi_device, &chain->base, device,
2348                                pCreateInfo, image_params, pAllocator);
2349    if (result != VK_SUCCESS)
2350       goto fail;
2351 
2352    bool alpha = pCreateInfo->compositeAlpha ==
2353                       VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
2354 
2355    chain->base.destroy = wsi_wl_swapchain_destroy;
2356    chain->base.get_wsi_image = wsi_wl_swapchain_get_wsi_image;
2357    chain->base.acquire_next_image = wsi_wl_swapchain_acquire_next_image;
2358    chain->base.queue_present = wsi_wl_swapchain_queue_present;
2359    chain->base.release_images = wsi_wl_swapchain_release_images;
2360    chain->base.set_present_mode = wsi_wl_swapchain_set_present_mode;
2361    chain->base.wait_for_present = wsi_wl_swapchain_wait_for_present;
2362    chain->base.present_mode = present_mode;
2363    chain->base.image_count = num_images;
2364    chain->extent = pCreateInfo->imageExtent;
2365    chain->vk_format = pCreateInfo->imageFormat;
2366    chain->buffer_type = buffer_type;
2367    if (buffer_type == WSI_WL_BUFFER_NATIVE) {
2368       chain->drm_format = wl_drm_format_for_vk_format(chain->vk_format, alpha);
2369    } else {
2370       chain->shm_format = wl_shm_format_for_vk_format(chain->vk_format, alpha);
2371    }
2372    chain->num_drm_modifiers = num_drm_modifiers;
2373    chain->drm_modifiers = drm_modifiers;
2374 
2375    if (chain->wsi_wl_surface->display->wp_presentation_notwrapped) {
2376       if (!wsi_init_pthread_cond_monotonic(&chain->present_ids.list_advanced))
2377          goto fail;
2378       pthread_mutex_init(&chain->present_ids.lock, NULL);
2379 
2380       wl_list_init(&chain->present_ids.outstanding_list);
2381       chain->present_ids.queue =
2382             wl_display_create_queue(chain->wsi_wl_surface->display->wl_display);
2383       chain->present_ids.wp_presentation =
2384             wl_proxy_create_wrapper(chain->wsi_wl_surface->display->wp_presentation_notwrapped);
2385       wl_proxy_set_queue((struct wl_proxy *) chain->present_ids.wp_presentation,
2386                          chain->present_ids.queue);
2387    }
2388 
2389    chain->fifo_ready = true;
2390 
2391    for (uint32_t i = 0; i < chain->base.image_count; i++) {
2392       result = wsi_wl_image_init(chain, &chain->images[i],
2393                                  pCreateInfo, pAllocator);
2394       if (result != VK_SUCCESS)
2395          goto fail_image_init;
2396       chain->images[i].busy = false;
2397    }
2398 
2399    *swapchain_out = &chain->base;
2400 
2401    return VK_SUCCESS;
2402 
2403 fail_image_init:
2404    wsi_wl_swapchain_images_free(chain);
2405 
2406    wsi_wl_swapchain_chain_free(chain, pAllocator);
2407 fail:
2408    vk_free(pAllocator, chain);
2409    wsi_wl_surface->chain = NULL;
2410 
2411    return result;
2412 }
2413 
2414 VkResult
wsi_wl_init_wsi(struct wsi_device * wsi_device,const VkAllocationCallbacks * alloc,VkPhysicalDevice physical_device)2415 wsi_wl_init_wsi(struct wsi_device *wsi_device,
2416                 const VkAllocationCallbacks *alloc,
2417                 VkPhysicalDevice physical_device)
2418 {
2419    struct wsi_wayland *wsi;
2420    VkResult result;
2421 
2422    wsi = vk_alloc(alloc, sizeof(*wsi), 8,
2423                    VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
2424    if (!wsi) {
2425       result = VK_ERROR_OUT_OF_HOST_MEMORY;
2426       goto fail;
2427    }
2428 
2429    wsi->physical_device = physical_device;
2430    wsi->alloc = alloc;
2431    wsi->wsi = wsi_device;
2432 
2433    wsi->base.get_support = wsi_wl_surface_get_support;
2434    wsi->base.get_capabilities2 = wsi_wl_surface_get_capabilities2;
2435    wsi->base.get_formats = wsi_wl_surface_get_formats;
2436    wsi->base.get_formats2 = wsi_wl_surface_get_formats2;
2437    wsi->base.get_present_modes = wsi_wl_surface_get_present_modes;
2438    wsi->base.get_present_rectangles = wsi_wl_surface_get_present_rectangles;
2439    wsi->base.create_swapchain = wsi_wl_surface_create_swapchain;
2440 
2441    wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = &wsi->base;
2442 
2443    return VK_SUCCESS;
2444 
2445 fail:
2446    wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = NULL;
2447 
2448    return result;
2449 }
2450 
2451 void
wsi_wl_finish_wsi(struct wsi_device * wsi_device,const VkAllocationCallbacks * alloc)2452 wsi_wl_finish_wsi(struct wsi_device *wsi_device,
2453                   const VkAllocationCallbacks *alloc)
2454 {
2455    struct wsi_wayland *wsi =
2456       (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
2457    if (!wsi)
2458       return;
2459 
2460    vk_free(alloc, wsi);
2461 }
2462