1 /*
2 * Copyright 2021 Red Hat, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /** VK_EXT_headless_surface */
25
26 #include "util/macros.h"
27 #include "util/hash_table.h"
28 #include "util/timespec.h"
29 #include "util/u_thread.h"
30 #include "util/xmlconfig.h"
31 #include "vk_util.h"
32 #include "vk_enum_to_str.h"
33 #include "vk_instance.h"
34 #include "vk_physical_device.h"
35 #include "wsi_common_entrypoints.h"
36 #include "wsi_common_private.h"
37 #include "wsi_common_queue.h"
38
39 #include "drm-uapi/drm_fourcc.h"
40
41 struct wsi_headless_format {
42 VkFormat format;
43 struct u_vector modifiers;
44 };
45
46 struct wsi_headless {
47 struct wsi_interface base;
48
49 struct wsi_device *wsi;
50
51 const VkAllocationCallbacks *alloc;
52 VkPhysicalDevice physical_device;
53 };
54
55 static VkResult
wsi_headless_surface_get_support(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,uint32_t queueFamilyIndex,VkBool32 * pSupported)56 wsi_headless_surface_get_support(VkIcdSurfaceBase *surface,
57 struct wsi_device *wsi_device,
58 uint32_t queueFamilyIndex,
59 VkBool32* pSupported)
60 {
61 *pSupported = true;
62
63 return VK_SUCCESS;
64 }
65
66 static const VkPresentModeKHR present_modes[] = {
67 VK_PRESENT_MODE_MAILBOX_KHR,
68 VK_PRESENT_MODE_FIFO_KHR,
69 };
70
71 static VkResult
wsi_headless_surface_get_capabilities(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,VkSurfaceCapabilitiesKHR * caps)72 wsi_headless_surface_get_capabilities(VkIcdSurfaceBase *surface,
73 struct wsi_device *wsi_device,
74 VkSurfaceCapabilitiesKHR* caps)
75 {
76 /* For true mailbox mode, we need at least 4 images:
77 * 1) One to scan out from
78 * 2) One to have queued for scan-out
79 * 3) One to be currently held by the Wayland compositor
80 * 4) One to render to
81 */
82 caps->minImageCount = 4;
83 /* There is no real maximum */
84 caps->maxImageCount = 0;
85
86 caps->currentExtent = (VkExtent2D) { -1, -1 };
87 caps->minImageExtent = (VkExtent2D) { 1, 1 };
88 caps->maxImageExtent = (VkExtent2D) {
89 wsi_device->maxImageDimension2D,
90 wsi_device->maxImageDimension2D,
91 };
92
93 caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
94 caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
95 caps->maxImageArrayLayers = 1;
96
97 caps->supportedCompositeAlpha =
98 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
99 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
100
101 caps->supportedUsageFlags =
102 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
103 VK_IMAGE_USAGE_SAMPLED_BIT |
104 VK_IMAGE_USAGE_TRANSFER_DST_BIT |
105 VK_IMAGE_USAGE_STORAGE_BIT |
106 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
107
108 VK_FROM_HANDLE(vk_physical_device, pdevice, wsi_device->pdevice);
109 if (pdevice->supported_extensions.EXT_attachment_feedback_loop_layout)
110 caps->supportedUsageFlags |= VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT;
111
112 return VK_SUCCESS;
113 }
114
115 static VkResult
wsi_headless_surface_get_capabilities2(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,const void * info_next,VkSurfaceCapabilities2KHR * caps)116 wsi_headless_surface_get_capabilities2(VkIcdSurfaceBase *surface,
117 struct wsi_device *wsi_device,
118 const void *info_next,
119 VkSurfaceCapabilities2KHR* caps)
120 {
121 assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
122
123 VkResult result =
124 wsi_headless_surface_get_capabilities(surface, wsi_device,
125 &caps->surfaceCapabilities);
126
127 vk_foreach_struct(ext, caps->pNext) {
128 switch (ext->sType) {
129 case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: {
130 VkSurfaceProtectedCapabilitiesKHR *protected = (void *)ext;
131 protected->supportsProtected = VK_FALSE;
132 break;
133 }
134
135 default:
136 /* Ignored */
137 break;
138 }
139 }
140
141 return result;
142 }
143
144 static VkResult
wsi_headless_surface_get_formats(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,uint32_t * pSurfaceFormatCount,VkSurfaceFormatKHR * pSurfaceFormats)145 wsi_headless_surface_get_formats(VkIcdSurfaceBase *icd_surface,
146 struct wsi_device *wsi_device,
147 uint32_t* pSurfaceFormatCount,
148 VkSurfaceFormatKHR* pSurfaceFormats)
149 {
150 struct wsi_headless *wsi =
151 (struct wsi_headless *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_HEADLESS];
152
153 VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormatKHR, out, pSurfaceFormats, pSurfaceFormatCount);
154
155 if (wsi->wsi->force_bgra8_unorm_first) {
156 vk_outarray_append_typed(VkSurfaceFormatKHR, &out, out_fmt) {
157 out_fmt->format = VK_FORMAT_B8G8R8A8_UNORM;
158 out_fmt->colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
159 }
160 vk_outarray_append_typed(VkSurfaceFormatKHR, &out, out_fmt) {
161 out_fmt->format = VK_FORMAT_R8G8B8A8_UNORM;
162 out_fmt->colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
163 }
164 } else {
165 vk_outarray_append_typed(VkSurfaceFormatKHR, &out, out_fmt) {
166 out_fmt->format = VK_FORMAT_R8G8B8A8_UNORM;
167 out_fmt->colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
168 }
169 vk_outarray_append_typed(VkSurfaceFormatKHR, &out, out_fmt) {
170 out_fmt->format = VK_FORMAT_B8G8R8A8_UNORM;
171 out_fmt->colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
172 }
173 }
174
175 return vk_outarray_status(&out);
176 }
177
178 static VkResult
wsi_headless_surface_get_formats2(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,const void * info_next,uint32_t * pSurfaceFormatCount,VkSurfaceFormat2KHR * pSurfaceFormats)179 wsi_headless_surface_get_formats2(VkIcdSurfaceBase *icd_surface,
180 struct wsi_device *wsi_device,
181 const void *info_next,
182 uint32_t* pSurfaceFormatCount,
183 VkSurfaceFormat2KHR* pSurfaceFormats)
184 {
185 struct wsi_headless *wsi =
186 (struct wsi_headless *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_HEADLESS];
187
188 VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormat2KHR, out, pSurfaceFormats, pSurfaceFormatCount);
189
190 if (wsi->wsi->force_bgra8_unorm_first) {
191 vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, out_fmt) {
192 out_fmt->surfaceFormat.format = VK_FORMAT_B8G8R8A8_UNORM;
193 out_fmt->surfaceFormat.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
194 }
195 vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, out_fmt) {
196 out_fmt->surfaceFormat.format = VK_FORMAT_R8G8B8A8_UNORM;
197 out_fmt->surfaceFormat.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
198 }
199 } else {
200 vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, out_fmt) {
201 out_fmt->surfaceFormat.format = VK_FORMAT_R8G8B8A8_UNORM;
202 out_fmt->surfaceFormat.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
203 }
204 vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, out_fmt) {
205 out_fmt->surfaceFormat.format = VK_FORMAT_B8G8R8A8_UNORM;
206 out_fmt->surfaceFormat.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
207 }
208 }
209
210 return vk_outarray_status(&out);
211 }
212
213 static VkResult
wsi_headless_surface_get_present_modes(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,uint32_t * pPresentModeCount,VkPresentModeKHR * pPresentModes)214 wsi_headless_surface_get_present_modes(VkIcdSurfaceBase *surface,
215 struct wsi_device *wsi_device,
216 uint32_t* pPresentModeCount,
217 VkPresentModeKHR* pPresentModes)
218 {
219 if (pPresentModes == NULL) {
220 *pPresentModeCount = ARRAY_SIZE(present_modes);
221 return VK_SUCCESS;
222 }
223
224 *pPresentModeCount = MIN2(*pPresentModeCount, ARRAY_SIZE(present_modes));
225 typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
226
227 if (*pPresentModeCount < ARRAY_SIZE(present_modes))
228 return VK_INCOMPLETE;
229 else
230 return VK_SUCCESS;
231 }
232
233 static VkResult
wsi_headless_surface_get_present_rectangles(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,uint32_t * pRectCount,VkRect2D * pRects)234 wsi_headless_surface_get_present_rectangles(VkIcdSurfaceBase *surface,
235 struct wsi_device *wsi_device,
236 uint32_t* pRectCount,
237 VkRect2D* pRects)
238 {
239 VK_OUTARRAY_MAKE_TYPED(VkRect2D, out, pRects, pRectCount);
240
241 vk_outarray_append_typed(VkRect2D, &out, rect) {
242 /* We don't know a size so just return the usual "I don't know." */
243 *rect = (VkRect2D) {
244 .offset = { 0, 0 },
245 .extent = { UINT32_MAX, UINT32_MAX },
246 };
247 }
248
249 return vk_outarray_status(&out);
250 }
251
252 struct wsi_headless_image {
253 struct wsi_image base;
254 bool busy;
255 };
256
257 struct wsi_headless_swapchain {
258 struct wsi_swapchain base;
259
260 VkExtent2D extent;
261 VkFormat vk_format;
262
263 struct u_vector modifiers;
264
265 VkPresentModeKHR present_mode;
266 bool fifo_ready;
267
268 struct wsi_headless_image images[0];
269 };
270 VK_DEFINE_NONDISP_HANDLE_CASTS(wsi_headless_swapchain, base.base, VkSwapchainKHR,
271 VK_OBJECT_TYPE_SWAPCHAIN_KHR)
272
273 static struct wsi_image *
wsi_headless_swapchain_get_wsi_image(struct wsi_swapchain * wsi_chain,uint32_t image_index)274 wsi_headless_swapchain_get_wsi_image(struct wsi_swapchain *wsi_chain,
275 uint32_t image_index)
276 {
277 struct wsi_headless_swapchain *chain =
278 (struct wsi_headless_swapchain *)wsi_chain;
279 return &chain->images[image_index].base;
280 }
281
282 static VkResult
wsi_headless_swapchain_acquire_next_image(struct wsi_swapchain * wsi_chain,const VkAcquireNextImageInfoKHR * info,uint32_t * image_index)283 wsi_headless_swapchain_acquire_next_image(struct wsi_swapchain *wsi_chain,
284 const VkAcquireNextImageInfoKHR *info,
285 uint32_t *image_index)
286 {
287 struct wsi_headless_swapchain *chain =
288 (struct wsi_headless_swapchain *)wsi_chain;
289 struct timespec start_time, end_time;
290 struct timespec rel_timeout;
291
292 timespec_from_nsec(&rel_timeout, info->timeout);
293
294 clock_gettime(CLOCK_MONOTONIC, &start_time);
295 timespec_add(&end_time, &rel_timeout, &start_time);
296
297 while (1) {
298 /* Try to find a free image. */
299 for (uint32_t i = 0; i < chain->base.image_count; i++) {
300 if (!chain->images[i].busy) {
301 /* We found a non-busy image */
302 *image_index = i;
303 chain->images[i].busy = true;
304 return VK_SUCCESS;
305 }
306 }
307
308 /* Check for timeout. */
309 struct timespec current_time;
310 clock_gettime(CLOCK_MONOTONIC, ¤t_time);
311 if (timespec_after(¤t_time, &end_time))
312 return VK_NOT_READY;
313 }
314 }
315
316 static VkResult
wsi_headless_swapchain_queue_present(struct wsi_swapchain * wsi_chain,uint32_t image_index,uint64_t present_id,const VkPresentRegionKHR * damage)317 wsi_headless_swapchain_queue_present(struct wsi_swapchain *wsi_chain,
318 uint32_t image_index,
319 uint64_t present_id,
320 const VkPresentRegionKHR *damage)
321 {
322 struct wsi_headless_swapchain *chain =
323 (struct wsi_headless_swapchain *)wsi_chain;
324
325 assert(image_index < chain->base.image_count);
326
327 chain->images[image_index].busy = false;
328
329 return VK_SUCCESS;
330 }
331
332 static VkResult
wsi_headless_swapchain_destroy(struct wsi_swapchain * wsi_chain,const VkAllocationCallbacks * pAllocator)333 wsi_headless_swapchain_destroy(struct wsi_swapchain *wsi_chain,
334 const VkAllocationCallbacks *pAllocator)
335 {
336 struct wsi_headless_swapchain *chain =
337 (struct wsi_headless_swapchain *)wsi_chain;
338
339 for (uint32_t i = 0; i < chain->base.image_count; i++) {
340 if (chain->images[i].base.image != VK_NULL_HANDLE)
341 wsi_destroy_image(&chain->base, &chain->images[i].base);
342 }
343
344 u_vector_finish(&chain->modifiers);
345
346 wsi_swapchain_finish(&chain->base);
347
348 vk_free(pAllocator, chain);
349
350 return VK_SUCCESS;
351 }
352
353 static const struct VkDrmFormatModifierPropertiesEXT *
get_modifier_props(const struct wsi_image_info * info,uint64_t modifier)354 get_modifier_props(const struct wsi_image_info *info, uint64_t modifier)
355 {
356 for (uint32_t i = 0; i < info->modifier_prop_count; i++) {
357 if (info->modifier_props[i].drmFormatModifier == modifier)
358 return &info->modifier_props[i];
359 }
360 return NULL;
361 }
362
363 static VkResult
wsi_create_null_image_mem(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image)364 wsi_create_null_image_mem(const struct wsi_swapchain *chain,
365 const struct wsi_image_info *info,
366 struct wsi_image *image)
367 {
368 const struct wsi_device *wsi = chain->wsi;
369 VkResult result;
370
371 VkMemoryRequirements reqs;
372 wsi->GetImageMemoryRequirements(chain->device, image->image, &reqs);
373
374 const VkMemoryDedicatedAllocateInfo memory_dedicated_info = {
375 .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
376 .pNext = NULL,
377 .image = image->image,
378 .buffer = VK_NULL_HANDLE,
379 };
380 const VkMemoryAllocateInfo memory_info = {
381 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
382 .pNext = &memory_dedicated_info,
383 .allocationSize = reqs.size,
384 .memoryTypeIndex =
385 wsi_select_device_memory_type(wsi, reqs.memoryTypeBits),
386 };
387 result = wsi->AllocateMemory(chain->device, &memory_info,
388 &chain->alloc, &image->memory);
389 if (result != VK_SUCCESS)
390 return result;
391
392 image->dma_buf_fd = -1;
393
394 if (info->drm_mod_list.drmFormatModifierCount > 0) {
395 VkImageDrmFormatModifierPropertiesEXT image_mod_props = {
396 .sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT,
397 };
398 result = wsi->GetImageDrmFormatModifierPropertiesEXT(chain->device,
399 image->image,
400 &image_mod_props);
401 if (result != VK_SUCCESS)
402 return result;
403
404 image->drm_modifier = image_mod_props.drmFormatModifier;
405 assert(image->drm_modifier != DRM_FORMAT_MOD_INVALID);
406
407 const struct VkDrmFormatModifierPropertiesEXT *mod_props =
408 get_modifier_props(info, image->drm_modifier);
409 image->num_planes = mod_props->drmFormatModifierPlaneCount;
410
411 for (uint32_t p = 0; p < image->num_planes; p++) {
412 const VkImageSubresource image_subresource = {
413 .aspectMask = VK_IMAGE_ASPECT_PLANE_0_BIT << p,
414 .mipLevel = 0,
415 .arrayLayer = 0,
416 };
417 VkSubresourceLayout image_layout;
418 wsi->GetImageSubresourceLayout(chain->device, image->image,
419 &image_subresource, &image_layout);
420 image->sizes[p] = image_layout.size;
421 image->row_pitches[p] = image_layout.rowPitch;
422 image->offsets[p] = image_layout.offset;
423 }
424 } else {
425 const VkImageSubresource image_subresource = {
426 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
427 .mipLevel = 0,
428 .arrayLayer = 0,
429 };
430 VkSubresourceLayout image_layout;
431 wsi->GetImageSubresourceLayout(chain->device, image->image,
432 &image_subresource, &image_layout);
433
434 image->drm_modifier = DRM_FORMAT_MOD_INVALID;
435 image->num_planes = 1;
436 image->sizes[0] = reqs.size;
437 image->row_pitches[0] = image_layout.rowPitch;
438 image->offsets[0] = 0;
439 }
440
441 return VK_SUCCESS;
442 }
443
444 static VkResult
wsi_headless_surface_create_swapchain(VkIcdSurfaceBase * icd_surface,VkDevice device,struct wsi_device * wsi_device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,struct wsi_swapchain ** swapchain_out)445 wsi_headless_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
446 VkDevice device,
447 struct wsi_device *wsi_device,
448 const VkSwapchainCreateInfoKHR* pCreateInfo,
449 const VkAllocationCallbacks* pAllocator,
450 struct wsi_swapchain **swapchain_out)
451 {
452 struct wsi_headless_swapchain *chain;
453 VkResult result;
454
455 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
456
457 int num_images = pCreateInfo->minImageCount;
458
459 size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
460 chain = vk_zalloc(pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
461 if (chain == NULL)
462 return VK_ERROR_OUT_OF_HOST_MEMORY;
463
464 struct wsi_drm_image_params drm_params = {
465 .base.image_type = WSI_IMAGE_TYPE_DRM,
466 .same_gpu = true,
467 };
468
469 result = wsi_swapchain_init(wsi_device, &chain->base, device,
470 pCreateInfo, &drm_params.base, pAllocator);
471 if (result != VK_SUCCESS) {
472 vk_free(pAllocator, chain);
473 return result;
474 }
475
476 chain->base.destroy = wsi_headless_swapchain_destroy;
477 chain->base.get_wsi_image = wsi_headless_swapchain_get_wsi_image;
478 chain->base.acquire_next_image = wsi_headless_swapchain_acquire_next_image;
479 chain->base.queue_present = wsi_headless_swapchain_queue_present;
480 chain->base.present_mode = wsi_swapchain_get_present_mode(wsi_device, pCreateInfo);
481 chain->base.image_count = num_images;
482 chain->extent = pCreateInfo->imageExtent;
483 chain->vk_format = pCreateInfo->imageFormat;
484
485 result = wsi_configure_image(&chain->base, pCreateInfo,
486 0, &chain->base.image_info);
487 if (result != VK_SUCCESS) {
488 goto fail;
489 }
490 chain->base.image_info.create_mem = wsi_create_null_image_mem;
491
492
493 for (uint32_t i = 0; i < chain->base.image_count; i++) {
494 result = wsi_create_image(&chain->base, &chain->base.image_info,
495 &chain->images[i].base);
496 if (result != VK_SUCCESS)
497 return result;
498
499 chain->images[i].busy = false;
500 }
501
502 *swapchain_out = &chain->base;
503
504 return VK_SUCCESS;
505
506 fail:
507 wsi_headless_swapchain_destroy(&chain->base, pAllocator);
508
509 return result;
510 }
511
512 VkResult
wsi_headless_init_wsi(struct wsi_device * wsi_device,const VkAllocationCallbacks * alloc,VkPhysicalDevice physical_device)513 wsi_headless_init_wsi(struct wsi_device *wsi_device,
514 const VkAllocationCallbacks *alloc,
515 VkPhysicalDevice physical_device)
516 {
517 struct wsi_headless *wsi;
518 VkResult result;
519
520 wsi = vk_alloc(alloc, sizeof(*wsi), 8,
521 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
522 if (!wsi) {
523 result = VK_ERROR_OUT_OF_HOST_MEMORY;
524 goto fail;
525 }
526
527 wsi->physical_device = physical_device;
528 wsi->alloc = alloc;
529 wsi->wsi = wsi_device;
530
531 wsi->base.get_support = wsi_headless_surface_get_support;
532 wsi->base.get_capabilities2 = wsi_headless_surface_get_capabilities2;
533 wsi->base.get_formats = wsi_headless_surface_get_formats;
534 wsi->base.get_formats2 = wsi_headless_surface_get_formats2;
535 wsi->base.get_present_modes = wsi_headless_surface_get_present_modes;
536 wsi->base.get_present_rectangles = wsi_headless_surface_get_present_rectangles;
537 wsi->base.create_swapchain = wsi_headless_surface_create_swapchain;
538
539 wsi_device->wsi[VK_ICD_WSI_PLATFORM_HEADLESS] = &wsi->base;
540
541 return VK_SUCCESS;
542
543 fail:
544 wsi_device->wsi[VK_ICD_WSI_PLATFORM_HEADLESS] = NULL;
545
546 return result;
547 }
548
549 void
wsi_headless_finish_wsi(struct wsi_device * wsi_device,const VkAllocationCallbacks * alloc)550 wsi_headless_finish_wsi(struct wsi_device *wsi_device,
551 const VkAllocationCallbacks *alloc)
552 {
553 struct wsi_headless *wsi =
554 (struct wsi_headless *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_HEADLESS];
555 if (!wsi)
556 return;
557
558 vk_free(alloc, wsi);
559 }
560
wsi_CreateHeadlessSurfaceEXT(VkInstance _instance,const VkHeadlessSurfaceCreateInfoEXT * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)561 VkResult wsi_CreateHeadlessSurfaceEXT(
562 VkInstance _instance,
563 const VkHeadlessSurfaceCreateInfoEXT* pCreateInfo,
564 const VkAllocationCallbacks* pAllocator,
565 VkSurfaceKHR* pSurface)
566 {
567 VK_FROM_HANDLE(vk_instance, instance, _instance);
568 VkIcdSurfaceHeadless *surface;
569
570 surface = vk_alloc2(&instance->alloc, pAllocator, sizeof *surface, 8,
571 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
572 if (surface == NULL)
573 return VK_ERROR_OUT_OF_HOST_MEMORY;
574
575 surface->base.platform = VK_ICD_WSI_PLATFORM_HEADLESS;
576
577 *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
578 return VK_SUCCESS;
579 }
580