1 /*
2 * Copyright © 2021 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "vk_fence.h"
25
26 #include "util/os_time.h"
27 #include "util/perf/cpu_trace.h"
28
29 #ifndef _WIN32
30 #include <unistd.h>
31 #endif
32
33 #include "vk_common_entrypoints.h"
34 #include "vk_device.h"
35 #include "vk_log.h"
36 #include "vk_physical_device.h"
37 #include "vk_util.h"
38
39 static VkExternalFenceHandleTypeFlags
vk_sync_fence_import_types(const struct vk_sync_type * type)40 vk_sync_fence_import_types(const struct vk_sync_type *type)
41 {
42 VkExternalFenceHandleTypeFlags handle_types = 0;
43
44 if (type->import_opaque_fd)
45 handle_types |= VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT;
46
47 if (type->import_sync_file)
48 handle_types |= VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
49
50 return handle_types;
51 }
52
53 static VkExternalFenceHandleTypeFlags
vk_sync_fence_export_types(const struct vk_sync_type * type)54 vk_sync_fence_export_types(const struct vk_sync_type *type)
55 {
56 VkExternalFenceHandleTypeFlags handle_types = 0;
57
58 if (type->export_opaque_fd)
59 handle_types |= VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT;
60
61 if (type->export_sync_file)
62 handle_types |= VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
63
64 return handle_types;
65 }
66
67 static VkExternalFenceHandleTypeFlags
vk_sync_fence_handle_types(const struct vk_sync_type * type)68 vk_sync_fence_handle_types(const struct vk_sync_type *type)
69 {
70 return vk_sync_fence_export_types(type) &
71 vk_sync_fence_import_types(type);
72 }
73
74 static const struct vk_sync_type *
get_fence_sync_type(struct vk_physical_device * pdevice,VkExternalFenceHandleTypeFlags handle_types)75 get_fence_sync_type(struct vk_physical_device *pdevice,
76 VkExternalFenceHandleTypeFlags handle_types)
77 {
78 static const enum vk_sync_features req_features =
79 VK_SYNC_FEATURE_BINARY |
80 VK_SYNC_FEATURE_CPU_WAIT |
81 VK_SYNC_FEATURE_CPU_RESET;
82
83 for (const struct vk_sync_type *const *t =
84 pdevice->supported_sync_types; *t; t++) {
85 if (req_features & ~(*t)->features)
86 continue;
87
88 if (handle_types & ~vk_sync_fence_handle_types(*t))
89 continue;
90
91 return *t;
92 }
93
94 return NULL;
95 }
96
97 VkResult
vk_fence_create(struct vk_device * device,const VkFenceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,struct vk_fence ** fence_out)98 vk_fence_create(struct vk_device *device,
99 const VkFenceCreateInfo *pCreateInfo,
100 const VkAllocationCallbacks *pAllocator,
101 struct vk_fence **fence_out)
102 {
103 struct vk_fence *fence;
104
105 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
106
107 const VkExportFenceCreateInfo *export =
108 vk_find_struct_const(pCreateInfo->pNext, EXPORT_FENCE_CREATE_INFO);
109 VkExternalFenceHandleTypeFlags handle_types =
110 export ? export->handleTypes : 0;
111
112 const struct vk_sync_type *sync_type =
113 get_fence_sync_type(device->physical, handle_types);
114 if (sync_type == NULL) {
115 /* We should always be able to get a fence type for internal */
116 assert(get_fence_sync_type(device->physical, 0) != NULL);
117 return vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
118 "Combination of external handle types is unsupported "
119 "for VkFence creation.");
120 }
121
122 /* Allocate a vk_fence + vk_sync implementation. Because the permanent
123 * field of vk_fence is the base field of the vk_sync implementation, we
124 * can make the 2 structures overlap.
125 */
126 size_t size = offsetof(struct vk_fence, permanent) + sync_type->size;
127 fence = vk_object_zalloc(device, pAllocator, size, VK_OBJECT_TYPE_FENCE);
128 if (fence == NULL)
129 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
130
131 enum vk_sync_flags sync_flags = 0;
132 if (handle_types)
133 sync_flags |= VK_SYNC_IS_SHAREABLE;
134
135 bool signaled = pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT;
136 VkResult result = vk_sync_init(device, &fence->permanent,
137 sync_type, sync_flags, signaled);
138 if (result != VK_SUCCESS) {
139 vk_object_free(device, pAllocator, fence);
140 return result;
141 }
142
143 *fence_out = fence;
144
145 return VK_SUCCESS;
146 }
147
148 VKAPI_ATTR VkResult VKAPI_CALL
vk_common_CreateFence(VkDevice _device,const VkFenceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFence * pFence)149 vk_common_CreateFence(VkDevice _device,
150 const VkFenceCreateInfo *pCreateInfo,
151 const VkAllocationCallbacks *pAllocator,
152 VkFence *pFence)
153 {
154 VK_FROM_HANDLE(vk_device, device, _device);
155 struct vk_fence *fence = NULL;
156
157 VkResult result = vk_fence_create(device, pCreateInfo, pAllocator, &fence);
158 if (result != VK_SUCCESS)
159 return result;
160
161 *pFence = vk_fence_to_handle(fence);
162
163 return VK_SUCCESS;
164 }
165
166 void
vk_fence_reset_temporary(struct vk_device * device,struct vk_fence * fence)167 vk_fence_reset_temporary(struct vk_device *device,
168 struct vk_fence *fence)
169 {
170 if (fence->temporary == NULL)
171 return;
172
173 vk_sync_destroy(device, fence->temporary);
174 fence->temporary = NULL;
175 }
176
177 void
vk_fence_destroy(struct vk_device * device,struct vk_fence * fence,const VkAllocationCallbacks * pAllocator)178 vk_fence_destroy(struct vk_device *device,
179 struct vk_fence *fence,
180 const VkAllocationCallbacks *pAllocator)
181 {
182 vk_fence_reset_temporary(device, fence);
183 vk_sync_finish(device, &fence->permanent);
184
185 vk_object_free(device, pAllocator, fence);
186 }
187
188 VKAPI_ATTR void VKAPI_CALL
vk_common_DestroyFence(VkDevice _device,VkFence _fence,const VkAllocationCallbacks * pAllocator)189 vk_common_DestroyFence(VkDevice _device,
190 VkFence _fence,
191 const VkAllocationCallbacks *pAllocator)
192 {
193 VK_FROM_HANDLE(vk_device, device, _device);
194 VK_FROM_HANDLE(vk_fence, fence, _fence);
195
196 if (fence == NULL)
197 return;
198
199 vk_fence_destroy(device, fence, pAllocator);
200 }
201
202 VKAPI_ATTR VkResult VKAPI_CALL
vk_common_ResetFences(VkDevice _device,uint32_t fenceCount,const VkFence * pFences)203 vk_common_ResetFences(VkDevice _device,
204 uint32_t fenceCount,
205 const VkFence *pFences)
206 {
207 VK_FROM_HANDLE(vk_device, device, _device);
208
209 for (uint32_t i = 0; i < fenceCount; i++) {
210 VK_FROM_HANDLE(vk_fence, fence, pFences[i]);
211
212 /* From the Vulkan 1.2.194 spec:
213 *
214 * "If any member of pFences currently has its payload imported with
215 * temporary permanence, that fence’s prior permanent payload is
216 * first restored. The remaining operations described therefore
217 * operate on the restored payload."
218 */
219 vk_fence_reset_temporary(device, fence);
220
221 VkResult result = vk_sync_reset(device, &fence->permanent);
222 if (result != VK_SUCCESS)
223 return result;
224 }
225
226 return VK_SUCCESS;
227 }
228
229 VKAPI_ATTR VkResult VKAPI_CALL
vk_common_GetFenceStatus(VkDevice _device,VkFence _fence)230 vk_common_GetFenceStatus(VkDevice _device,
231 VkFence _fence)
232 {
233 VK_FROM_HANDLE(vk_device, device, _device);
234 VK_FROM_HANDLE(vk_fence, fence, _fence);
235
236 if (vk_device_is_lost(device))
237 return VK_ERROR_DEVICE_LOST;
238
239 VkResult result = vk_sync_wait(device, vk_fence_get_active_sync(fence),
240 0 /* wait_value */,
241 VK_SYNC_WAIT_COMPLETE,
242 0 /* abs_timeout_ns */);
243 if (result == VK_TIMEOUT)
244 return VK_NOT_READY;
245 else
246 return result;
247 }
248
249 VKAPI_ATTR VkResult VKAPI_CALL
vk_common_WaitForFences(VkDevice _device,uint32_t fenceCount,const VkFence * pFences,VkBool32 waitAll,uint64_t timeout)250 vk_common_WaitForFences(VkDevice _device,
251 uint32_t fenceCount,
252 const VkFence *pFences,
253 VkBool32 waitAll,
254 uint64_t timeout)
255 {
256 MESA_TRACE_FUNC();
257
258 VK_FROM_HANDLE(vk_device, device, _device);
259
260 if (vk_device_is_lost(device))
261 return VK_ERROR_DEVICE_LOST;
262
263 if (fenceCount == 0)
264 return VK_SUCCESS;
265
266 uint64_t abs_timeout_ns = os_time_get_absolute_timeout(timeout);
267
268 STACK_ARRAY(struct vk_sync_wait, waits, fenceCount);
269
270 for (uint32_t i = 0; i < fenceCount; i++) {
271 VK_FROM_HANDLE(vk_fence, fence, pFences[i]);
272 waits[i] = (struct vk_sync_wait) {
273 .sync = vk_fence_get_active_sync(fence),
274 .stage_mask = ~(VkPipelineStageFlags2)0,
275 };
276 }
277
278 enum vk_sync_wait_flags wait_flags = VK_SYNC_WAIT_COMPLETE;
279 if (!waitAll)
280 wait_flags |= VK_SYNC_WAIT_ANY;
281
282 VkResult result = vk_sync_wait_many(device, fenceCount, waits,
283 wait_flags, abs_timeout_ns);
284
285 STACK_ARRAY_FINISH(waits);
286
287 VkResult device_status = vk_device_check_status(device);
288 if (device_status != VK_SUCCESS)
289 return device_status;
290
291 return result;
292 }
293
294 VKAPI_ATTR void VKAPI_CALL
vk_common_GetPhysicalDeviceExternalFenceProperties(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalFenceInfo * pExternalFenceInfo,VkExternalFenceProperties * pExternalFenceProperties)295 vk_common_GetPhysicalDeviceExternalFenceProperties(
296 VkPhysicalDevice physicalDevice,
297 const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
298 VkExternalFenceProperties *pExternalFenceProperties)
299 {
300 VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
301
302 assert(pExternalFenceInfo->sType ==
303 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO);
304 const VkExternalFenceHandleTypeFlagBits handle_type =
305 pExternalFenceInfo->handleType;
306
307 const struct vk_sync_type *sync_type =
308 get_fence_sync_type(pdevice, handle_type);
309 if (sync_type == NULL) {
310 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
311 pExternalFenceProperties->compatibleHandleTypes = 0;
312 pExternalFenceProperties->externalFenceFeatures = 0;
313 return;
314 }
315
316 VkExternalFenceHandleTypeFlagBits import =
317 vk_sync_fence_import_types(sync_type);
318 VkExternalFenceHandleTypeFlagBits export =
319 vk_sync_fence_export_types(sync_type);
320
321 if (handle_type != VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT) {
322 const struct vk_sync_type *opaque_sync_type =
323 get_fence_sync_type(pdevice, VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT);
324
325 /* If we're a different vk_sync_type than the one selected when only
326 * OPAQUE_FD is set, then we can't import/export OPAQUE_FD. Put
327 * differently, there can only be one OPAQUE_FD sync type.
328 */
329 if (sync_type != opaque_sync_type) {
330 import &= ~VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT;
331 export &= ~VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT;
332 }
333 }
334
335 VkExternalFenceHandleTypeFlags compatible = import & export;
336 VkExternalFenceFeatureFlags features = 0;
337 if (handle_type & export)
338 features |= VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT;
339 if (handle_type & import)
340 features |= VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT;
341
342 pExternalFenceProperties->exportFromImportedHandleTypes = export;
343 pExternalFenceProperties->compatibleHandleTypes = compatible;
344 pExternalFenceProperties->externalFenceFeatures = features;
345 }
346
347 #ifndef _WIN32
348
349 VKAPI_ATTR VkResult VKAPI_CALL
vk_common_ImportFenceFdKHR(VkDevice _device,const VkImportFenceFdInfoKHR * pImportFenceFdInfo)350 vk_common_ImportFenceFdKHR(VkDevice _device,
351 const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
352 {
353 VK_FROM_HANDLE(vk_device, device, _device);
354 VK_FROM_HANDLE(vk_fence, fence, pImportFenceFdInfo->fence);
355
356 assert(pImportFenceFdInfo->sType ==
357 VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR);
358
359 const int fd = pImportFenceFdInfo->fd;
360 const VkExternalFenceHandleTypeFlagBits handle_type =
361 pImportFenceFdInfo->handleType;
362
363 struct vk_sync *temporary = NULL, *sync;
364 if (pImportFenceFdInfo->flags & VK_FENCE_IMPORT_TEMPORARY_BIT) {
365 const struct vk_sync_type *sync_type =
366 get_fence_sync_type(device->physical, handle_type);
367
368 VkResult result = vk_sync_create(device, sync_type, 0 /* flags */,
369 0 /* initial_value */, &temporary);
370 if (result != VK_SUCCESS)
371 return result;
372
373 sync = temporary;
374 } else {
375 sync = &fence->permanent;
376 }
377 assert(handle_type & vk_sync_fence_handle_types(sync->type));
378
379 VkResult result;
380 switch (pImportFenceFdInfo->handleType) {
381 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
382 result = vk_sync_import_opaque_fd(device, sync, fd);
383 break;
384
385 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
386 result = vk_sync_import_sync_file(device, sync, fd);
387 break;
388
389 default:
390 result = vk_error(fence, VK_ERROR_INVALID_EXTERNAL_HANDLE);
391 }
392
393 if (result != VK_SUCCESS) {
394 if (temporary != NULL)
395 vk_sync_destroy(device, temporary);
396 return result;
397 }
398
399 /* From the Vulkan 1.2.194 spec:
400 *
401 * "Importing a fence payload from a file descriptor transfers
402 * ownership of the file descriptor from the application to the
403 * Vulkan implementation. The application must not perform any
404 * operations on the file descriptor after a successful import."
405 *
406 * If the import fails, we leave the file descriptor open.
407 */
408 if (fd != -1)
409 close(fd);
410
411 if (temporary) {
412 vk_fence_reset_temporary(device, fence);
413 fence->temporary = temporary;
414 }
415
416 return VK_SUCCESS;
417 }
418
419 VKAPI_ATTR VkResult VKAPI_CALL
vk_common_GetFenceFdKHR(VkDevice _device,const VkFenceGetFdInfoKHR * pGetFdInfo,int * pFd)420 vk_common_GetFenceFdKHR(VkDevice _device,
421 const VkFenceGetFdInfoKHR *pGetFdInfo,
422 int *pFd)
423 {
424 VK_FROM_HANDLE(vk_device, device, _device);
425 VK_FROM_HANDLE(vk_fence, fence, pGetFdInfo->fence);
426
427 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR);
428
429 struct vk_sync *sync = vk_fence_get_active_sync(fence);
430
431 VkResult result;
432 switch (pGetFdInfo->handleType) {
433 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
434 result = vk_sync_export_opaque_fd(device, sync, pFd);
435 if (unlikely(result != VK_SUCCESS))
436 return result;
437 break;
438
439 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
440 /* There's no direct spec quote for this but the same rules as for
441 * semaphore export apply. We can't export a sync file from a fence
442 * if the fence event hasn't been submitted to the kernel yet.
443 */
444 if (vk_device_supports_threaded_submit(device)) {
445 result = vk_sync_wait(device, sync, 0,
446 VK_SYNC_WAIT_PENDING,
447 UINT64_MAX);
448 if (unlikely(result != VK_SUCCESS))
449 return result;
450 }
451
452 result = vk_sync_export_sync_file(device, sync, pFd);
453 if (unlikely(result != VK_SUCCESS))
454 return result;
455
456 /* From the Vulkan 1.2.194 spec:
457 *
458 * "Export operations have the same transference as the specified
459 * handle type’s import operations. Additionally, exporting a fence
460 * payload to a handle with copy transference has the same side
461 * effects on the source fence’s payload as executing a fence reset
462 * operation."
463 *
464 * In other words, exporting a sync file also resets the fence. We
465 * only care about this for the permanent payload because the temporary
466 * payload will be destroyed below.
467 */
468 if (sync == &fence->permanent) {
469 result = vk_sync_reset(device, sync);
470 if (unlikely(result != VK_SUCCESS))
471 return result;
472 }
473 break;
474
475 default:
476 unreachable("Invalid fence export handle type");
477 }
478
479 /* From the Vulkan 1.2.194 spec:
480 *
481 * "Export operations have the same transference as the specified
482 * handle type’s import operations. [...] If the fence was using a
483 * temporarily imported payload, the fence’s prior permanent payload
484 * will be restored.
485 */
486 vk_fence_reset_temporary(device, fence);
487
488 return VK_SUCCESS;
489 }
490
491 #endif /* !defined(_WIN32) */
492