1 /*
2 * Copyright © 2015 Intel Corporation
3 * Copyright © 2022 Collabora, Ltd
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "vk_command_pool.h"
26
27 #include "vk_alloc.h"
28 #include "vk_command_buffer.h"
29 #include "vk_common_entrypoints.h"
30 #include "vk_device.h"
31 #include "vk_log.h"
32
33 static bool
should_recycle_command_buffers(struct vk_device * device)34 should_recycle_command_buffers(struct vk_device *device)
35 {
36 /* They have to be using the common allocation implementation, otherwise
37 * the recycled command buffers will never actually get re-used
38 */
39 const struct vk_device_dispatch_table *disp = &device->dispatch_table;
40 if (disp->AllocateCommandBuffers != vk_common_AllocateCommandBuffers)
41 return false;
42
43 /* We need to be able to reset command buffers */
44 if (device->command_buffer_ops->reset == NULL)
45 return false;
46
47 return true;
48 }
49
50 VkResult MUST_CHECK
vk_command_pool_init(struct vk_device * device,struct vk_command_pool * pool,const VkCommandPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator)51 vk_command_pool_init(struct vk_device *device,
52 struct vk_command_pool *pool,
53 const VkCommandPoolCreateInfo *pCreateInfo,
54 const VkAllocationCallbacks *pAllocator)
55 {
56 memset(pool, 0, sizeof(*pool));
57 vk_object_base_init(device, &pool->base,
58 VK_OBJECT_TYPE_COMMAND_POOL);
59
60 pool->flags = pCreateInfo->flags;
61 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
62 pool->alloc = pAllocator ? *pAllocator : device->alloc;
63 pool->command_buffer_ops = device->command_buffer_ops;
64 pool->recycle_command_buffers = should_recycle_command_buffers(device);
65 list_inithead(&pool->command_buffers);
66 list_inithead(&pool->free_command_buffers);
67
68 return VK_SUCCESS;
69 }
70
71 void
vk_command_pool_finish(struct vk_command_pool * pool)72 vk_command_pool_finish(struct vk_command_pool *pool)
73 {
74 list_for_each_entry_safe(struct vk_command_buffer, cmd_buffer,
75 &pool->command_buffers, pool_link) {
76 cmd_buffer->ops->destroy(cmd_buffer);
77 }
78 assert(list_is_empty(&pool->command_buffers));
79
80 list_for_each_entry_safe(struct vk_command_buffer, cmd_buffer,
81 &pool->free_command_buffers, pool_link) {
82 cmd_buffer->ops->destroy(cmd_buffer);
83 }
84 assert(list_is_empty(&pool->free_command_buffers));
85
86 vk_object_base_finish(&pool->base);
87 }
88
89 VKAPI_ATTR VkResult VKAPI_CALL
vk_common_CreateCommandPool(VkDevice _device,const VkCommandPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkCommandPool * pCommandPool)90 vk_common_CreateCommandPool(VkDevice _device,
91 const VkCommandPoolCreateInfo *pCreateInfo,
92 const VkAllocationCallbacks *pAllocator,
93 VkCommandPool *pCommandPool)
94 {
95 VK_FROM_HANDLE(vk_device, device, _device);
96 struct vk_command_pool *pool;
97 VkResult result;
98
99 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
100 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
101 if (pool == NULL)
102 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
103
104 result = vk_command_pool_init(device, pool, pCreateInfo, pAllocator);
105 if (unlikely(result != VK_SUCCESS)) {
106 vk_free2(&device->alloc, pAllocator, pool);
107 return result;
108 }
109
110 *pCommandPool = vk_command_pool_to_handle(pool);
111
112 return VK_SUCCESS;
113 }
114
115 VKAPI_ATTR void VKAPI_CALL
vk_common_DestroyCommandPool(VkDevice _device,VkCommandPool commandPool,const VkAllocationCallbacks * pAllocator)116 vk_common_DestroyCommandPool(VkDevice _device,
117 VkCommandPool commandPool,
118 const VkAllocationCallbacks *pAllocator)
119 {
120 VK_FROM_HANDLE(vk_device, device, _device);
121 VK_FROM_HANDLE(vk_command_pool, pool, commandPool);
122
123 if (pool == NULL)
124 return;
125
126 vk_command_pool_finish(pool);
127 vk_free2(&device->alloc, pAllocator, pool);
128 }
129
130 VKAPI_ATTR VkResult VKAPI_CALL
vk_common_ResetCommandPool(VkDevice device,VkCommandPool commandPool,VkCommandPoolResetFlags flags)131 vk_common_ResetCommandPool(VkDevice device,
132 VkCommandPool commandPool,
133 VkCommandPoolResetFlags flags)
134 {
135 VK_FROM_HANDLE(vk_command_pool, pool, commandPool);
136 const struct vk_device_dispatch_table *disp =
137 &pool->base.device->dispatch_table;
138
139 #define COPY_FLAG(flag) \
140 if (flags & VK_COMMAND_POOL_RESET_##flag) \
141 cb_flags |= VK_COMMAND_BUFFER_RESET_##flag
142
143 VkCommandBufferResetFlags cb_flags = 0;
144 COPY_FLAG(RELEASE_RESOURCES_BIT);
145
146 #undef COPY_FLAG
147
148 list_for_each_entry_safe(struct vk_command_buffer, cmd_buffer,
149 &pool->command_buffers, pool_link) {
150 VkResult result =
151 disp->ResetCommandBuffer(vk_command_buffer_to_handle(cmd_buffer),
152 cb_flags);
153 if (result != VK_SUCCESS)
154 return result;
155 }
156
157 return VK_SUCCESS;
158 }
159
160 static void
vk_command_buffer_recycle_or_destroy(struct vk_command_pool * pool,struct vk_command_buffer * cmd_buffer)161 vk_command_buffer_recycle_or_destroy(struct vk_command_pool *pool,
162 struct vk_command_buffer *cmd_buffer)
163 {
164 assert(pool == cmd_buffer->pool);
165
166 if (pool->recycle_command_buffers) {
167 vk_command_buffer_recycle(cmd_buffer);
168
169 list_del(&cmd_buffer->pool_link);
170 list_add(&cmd_buffer->pool_link, &pool->free_command_buffers);
171 } else {
172 cmd_buffer->ops->destroy(cmd_buffer);
173 }
174 }
175
176 static struct vk_command_buffer *
vk_command_pool_find_free(struct vk_command_pool * pool)177 vk_command_pool_find_free(struct vk_command_pool *pool)
178 {
179 if (list_is_empty(&pool->free_command_buffers))
180 return NULL;
181
182 struct vk_command_buffer *cmd_buffer =
183 list_first_entry(&pool->free_command_buffers,
184 struct vk_command_buffer, pool_link);
185
186 list_del(&cmd_buffer->pool_link);
187 list_addtail(&cmd_buffer->pool_link, &pool->command_buffers);
188
189 return cmd_buffer;
190 }
191
192 VKAPI_ATTR VkResult VKAPI_CALL
vk_common_AllocateCommandBuffers(VkDevice device,const VkCommandBufferAllocateInfo * pAllocateInfo,VkCommandBuffer * pCommandBuffers)193 vk_common_AllocateCommandBuffers(VkDevice device,
194 const VkCommandBufferAllocateInfo *pAllocateInfo,
195 VkCommandBuffer *pCommandBuffers)
196 {
197 VK_FROM_HANDLE(vk_command_pool, pool, pAllocateInfo->commandPool);
198 VkResult result;
199 uint32_t i;
200
201 assert(device == vk_device_to_handle(pool->base.device));
202
203 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
204 struct vk_command_buffer *cmd_buffer = vk_command_pool_find_free(pool);
205 if (cmd_buffer == NULL) {
206 result = pool->command_buffer_ops->create(pool, &cmd_buffer);
207 if (unlikely(result != VK_SUCCESS))
208 goto fail;
209 }
210
211 cmd_buffer->level = pAllocateInfo->level;
212
213 pCommandBuffers[i] = vk_command_buffer_to_handle(cmd_buffer);
214 }
215
216 return VK_SUCCESS;
217
218 fail:
219 while (i--) {
220 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, pCommandBuffers[i]);
221 vk_command_buffer_recycle_or_destroy(pool, cmd_buffer);
222 }
223 for (i = 0; i < pAllocateInfo->commandBufferCount; i++)
224 pCommandBuffers[i] = VK_NULL_HANDLE;
225
226 return result;
227 }
228
229 VKAPI_ATTR void VKAPI_CALL
vk_common_FreeCommandBuffers(VkDevice device,VkCommandPool commandPool,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)230 vk_common_FreeCommandBuffers(VkDevice device,
231 VkCommandPool commandPool,
232 uint32_t commandBufferCount,
233 const VkCommandBuffer *pCommandBuffers)
234 {
235 VK_FROM_HANDLE(vk_command_pool, pool, commandPool);
236
237 for (uint32_t i = 0; i < commandBufferCount; i++) {
238 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, pCommandBuffers[i]);
239
240 if (cmd_buffer == NULL)
241 continue;
242
243 vk_command_buffer_recycle_or_destroy(pool, cmd_buffer);
244 }
245 }
246
247 void
vk_command_pool_trim(struct vk_command_pool * pool,VkCommandPoolTrimFlags flags)248 vk_command_pool_trim(struct vk_command_pool *pool,
249 VkCommandPoolTrimFlags flags)
250 {
251 list_for_each_entry_safe(struct vk_command_buffer, cmd_buffer,
252 &pool->free_command_buffers, pool_link) {
253 cmd_buffer->ops->destroy(cmd_buffer);
254 }
255 assert(list_is_empty(&pool->free_command_buffers));
256 }
257
258 VKAPI_ATTR void VKAPI_CALL
vk_common_TrimCommandPool(VkDevice device,VkCommandPool commandPool,VkCommandPoolTrimFlags flags)259 vk_common_TrimCommandPool(VkDevice device,
260 VkCommandPool commandPool,
261 VkCommandPoolTrimFlags flags)
262 {
263 VK_FROM_HANDLE(vk_command_pool, pool, commandPool);
264
265 vk_command_pool_trim(pool, flags);
266 }
267