1 // Copyright (C) 2023 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include <assert.h>
16 #include <fcntl.h>
17 #include <lib/magma/magma_common_defs.h>
18 #include <stdarg.h>
19 #include <stdint.h>
20 #include <string.h>
21 #include <sys/mman.h>
22 #include <unistd.h>
23 #include <virtgpu_drm.h>
24 #include <xf86drm.h>
25 
26 #include <limits>
27 #include <mutex>
28 #include <thread>
29 #include <unordered_map>
30 
31 #include "VirtioGpuAddressSpaceStream.h"
32 #include "EncoderDebug.h"
33 #include "magma_enc.h"
34 
get_ns_monotonic(bool raw)35 static uint64_t get_ns_monotonic(bool raw) {
36     struct timespec time;
37     int ret = clock_gettime(raw ? CLOCK_MONOTONIC_RAW : CLOCK_MONOTONIC, &time);
38     if (ret < 0) return 0;
39     return static_cast<uint64_t>(time.tv_sec) * 1000000000ULL + time.tv_nsec;
40 }
41 
42 class MagmaClientContext : public magma_encoder_context_t {
43    public:
44     MagmaClientContext(AddressSpaceStream* stream);
45 
stream()46     AddressSpaceStream* stream() {
47         return reinterpret_cast<AddressSpaceStream*>(magma_encoder_context_t::m_stream);
48     }
49 
50     magma_status_t get_fd_for_buffer(magma_buffer_t buffer, int* fd_out);
51 
mutex()52     std::mutex& mutex() { return m_mutex_; }
53 
54     static magma_status_t magma_device_import(void* self, magma_handle_t device_channel,
55                                               magma_device_t* device_out);
56     static magma_status_t magma_device_query(void* self, magma_device_t device, uint64_t id,
57                                              magma_handle_t* handle_out, uint64_t* value_out);
58     static magma_status_t magma_buffer_get_handle(void* self, magma_buffer_t buffer,
59                                                   magma_handle_t* handle_out);
60     static magma_status_t magma_buffer_export(void* self, magma_buffer_t buffer,
61                                               magma_handle_t* handle_out);
62     static magma_status_t magma_poll(void* self, magma_poll_item_t* items, uint32_t count,
63                                      uint64_t timeout_ns);
64     static magma_status_t magma_connection_create_buffer(void* self, magma_connection_t connection,
65                                                          uint64_t size, uint64_t* size_out,
66                                                          magma_buffer_t* buffer_out,
67                                                          magma_buffer_id_t* id_out);
68     static void magma_connection_release_buffer(void* self, magma_connection_t connection,
69                                                 magma_buffer_t buffer);
70 
set_thread_local_context_lock(std::unique_lock<std::mutex> * lock)71     static void set_thread_local_context_lock(std::unique_lock<std::mutex>* lock) { t_lock = lock; }
72 
get_thread_local_context_lock()73     static std::unique_lock<std::mutex>* get_thread_local_context_lock() { return t_lock; }
74 
75     magma_device_import_client_proc_t magma_device_import_enc_;
76     magma_buffer_get_handle_client_proc_t magma_buffer_get_handle_enc_;
77     magma_poll_client_proc_t magma_poll_enc_;
78     magma_connection_create_buffer_client_proc_t magma_connection_create_buffer_enc_;
79     magma_connection_release_buffer_client_proc_t magma_connection_release_buffer_enc_;
80 
81     int render_node_fd_;
82 
83     // Stores buffer info upon creation.
84     struct BufferInfo {
85         magma_connection_t connection;  // Owning connection.
86         uint64_t size;                  // Actual size.
87         magma_buffer_id_t id;           // Id.
88     };
89     std::unordered_map<magma_buffer_t, BufferInfo> buffer_info_;
90 
91     std::mutex m_mutex_;
92     static thread_local std::unique_lock<std::mutex>* t_lock;
93 };
94 
95 // This makes the mutex lock available to decoding methods that can take time
96 // (eg magma_poll), to prevent one thread from locking out others.
97 class ContextLock {
98    public:
ContextLock(MagmaClientContext * context)99     ContextLock(MagmaClientContext* context) : m_context_(context), m_lock_(context->mutex()) {
100         m_context_->set_thread_local_context_lock(&m_lock_);
101     }
102 
~ContextLock()103     ~ContextLock() { m_context_->set_thread_local_context_lock(nullptr); }
104 
105    private:
106     MagmaClientContext* m_context_;
107     std::unique_lock<std::mutex> m_lock_;
108 };
109 
110 // static
111 thread_local std::unique_lock<std::mutex>* MagmaClientContext::t_lock;
112 
MagmaClientContext(AddressSpaceStream * stream)113 MagmaClientContext::MagmaClientContext(AddressSpaceStream* stream)
114     : magma_encoder_context_t(stream, new gfxstream::guest::ChecksumCalculator) {
115     magma_device_import_enc_ = magma_client_context_t::magma_device_import;
116     magma_buffer_get_handle_enc_ = magma_client_context_t::magma_buffer_get_handle;
117     magma_poll_enc_ = magma_client_context_t::magma_poll;
118     magma_connection_create_buffer_enc_ = magma_client_context_t::magma_connection_create_buffer;
119 
120     magma_client_context_t::magma_device_import = &MagmaClientContext::magma_device_import;
121     magma_client_context_t::magma_device_query = &MagmaClientContext::magma_device_query;
122     magma_client_context_t::magma_buffer_get_handle = &MagmaClientContext::magma_buffer_get_handle;
123     magma_client_context_t::magma_buffer_export = &MagmaClientContext::magma_buffer_export;
124     magma_client_context_t::magma_poll = &MagmaClientContext::magma_poll;
125     magma_client_context_t::magma_connection_create_buffer =
126         &MagmaClientContext::magma_connection_create_buffer;
127     magma_client_context_t::magma_connection_release_buffer =
128         &MagmaClientContext::magma_connection_release_buffer;
129 }
130 
131 // static
magma_device_import(void * self,magma_handle_t device_channel,magma_device_t * device_out)132 magma_status_t MagmaClientContext::magma_device_import(void* self, magma_handle_t device_channel,
133                                                        magma_device_t* device_out) {
134     auto context = reinterpret_cast<MagmaClientContext*>(self);
135 
136     magma_handle_t placeholder = 0xacbd1234;  // not used
137 
138     magma_status_t status = context->magma_device_import_enc_(self, placeholder, device_out);
139 
140     // The local fd isn't needed, just close it.
141     int fd = device_channel;
142     close(fd);
143 
144     return status;
145 }
146 
get_fd_for_buffer(magma_buffer_t buffer,int * fd_out)147 magma_status_t MagmaClientContext::get_fd_for_buffer(magma_buffer_t buffer, int* fd_out) {
148     *fd_out = -1;
149 
150     auto it = buffer_info_.find(buffer);
151     if (it == buffer_info_.end()) {
152         ALOGE("%s: buffer (%lu) not found in map", __func__, buffer);
153         return MAGMA_STATUS_INVALID_ARGS;
154     }
155     auto& info = it->second;
156 
157     // TODO(fxbug.dev/42073573): Evaluate deferred guest resource creation.
158     auto blob = VirtGpuDevice::getInstance(VirtGpuCapset::kCapsetGfxStreamMagma)
159                     ->createBlob({.size = info.size,
160                                   .flags = kBlobFlagMappable | kBlobFlagShareable,
161                                   .blobMem = kBlobMemHost3d,
162                                   .blobId = info.id});
163     if (!blob) {
164         return MAGMA_STATUS_INTERNAL_ERROR;
165     }
166 
167     VirtGpuExternalHandle handle{};
168     int result = blob->exportBlob(handle);
169     if (result != 0 || handle.osHandle < 0) {
170         return MAGMA_STATUS_INTERNAL_ERROR;
171     }
172 
173     *fd_out = handle.osHandle;
174 
175     return MAGMA_STATUS_OK;
176 }
177 
magma_device_query(void * self,magma_device_t device,uint64_t id,magma_handle_t * handle_out,uint64_t * value_out)178 magma_status_t MagmaClientContext::magma_device_query(void* self, magma_device_t device,
179                                                       uint64_t id, magma_handle_t* handle_out,
180                                                       uint64_t* value_out) {
181     auto context = reinterpret_cast<MagmaClientContext*>(self);
182 
183     // TODO(b/277219980): Support guest-allocated buffers.
184     constexpr magma_bool_t kHostAllocate = 1;
185 
186     uint64_t value = 0;
187     uint64_t result_buffer_mapping_id = 0;
188     uint64_t result_buffer_size = 0;
189     magma_status_t status = context->magma_device_query_fudge(
190         self, device, id, kHostAllocate, &result_buffer_mapping_id, &result_buffer_size, &value);
191     if (status != MAGMA_STATUS_OK) {
192         ALOGE("magma_device_query_fudge failed: %d\n", status);
193         return status;
194     }
195 
196     // For non-buffer queries, just return the value.
197     if (result_buffer_size == 0) {
198         if (!value_out) {
199             ALOGE("MAGMA_STATUS_INVALID_ARGS\n");
200             return MAGMA_STATUS_INVALID_ARGS;
201         }
202         *value_out = value;
203         ALOGE("MAGMA_STATUS_OK (value = %lu)\n", value);
204         return MAGMA_STATUS_OK;
205     }
206 
207     // Otherwise, create and return a fd for the host-allocated buffer.
208     if (!handle_out) {
209         ALOGE("MAGMA_STATUS_INVALID_ARGS\n");
210         return MAGMA_STATUS_INVALID_ARGS;
211     }
212 
213     ALOGI("opening blob id %lu size %lu\n", result_buffer_mapping_id, result_buffer_size);
214     auto blob = VirtGpuDevice::getInstance(VirtGpuCapset::kCapsetGfxStreamMagma)
215                     ->createBlob({.size = result_buffer_size,
216                                   .flags = kBlobFlagMappable | kBlobFlagShareable,
217                                   .blobMem = kBlobMemHost3d,
218                                   .blobId = result_buffer_mapping_id});
219     if (!blob) {
220         ALOGE("VirtGpuDevice::createBlob failed\n");
221         return MAGMA_STATUS_INTERNAL_ERROR;
222     }
223 
224     VirtGpuExternalHandle handle{};
225     int result = blob->exportBlob(handle);
226     if (result != 0 || handle.osHandle < 0) {
227         ALOGE("VirtGpuResource::exportBlob failed\n");
228         return MAGMA_STATUS_INTERNAL_ERROR;
229     }
230 
231     *handle_out = handle.osHandle;
232     return MAGMA_STATUS_OK;
233 }
234 
magma_buffer_get_handle(void * self,magma_buffer_t buffer,magma_handle_t * handle_out)235 magma_status_t MagmaClientContext::magma_buffer_get_handle(void* self, magma_buffer_t buffer,
236                                                            magma_handle_t* handle_out) {
237     auto context = reinterpret_cast<MagmaClientContext*>(self);
238     magma_buffer_info_t info{};
239     magma_status_t status = context->magma_buffer_get_info(self, buffer, &info);
240     if (status != MAGMA_STATUS_OK) return status;
241     magma_handle_t mapping_id = 0;
242     status = context->magma_buffer_get_handle_enc_(self, buffer, &mapping_id);
243     if (status != MAGMA_STATUS_OK) return status;
244     auto blob = VirtGpuDevice::getInstance(VirtGpuCapset::kCapsetGfxStreamMagma)
245                     ->createBlob({.size = info.size,
246                                   .flags = kBlobFlagMappable | kBlobFlagShareable,
247                                   .blobMem = kBlobMemHost3d,
248                                   .blobId = mapping_id});
249     if (!blob) {
250         return MAGMA_STATUS_INTERNAL_ERROR;
251     }
252 
253     VirtGpuExternalHandle handle{};
254     int result = blob->exportBlob(handle);
255     if (result != 0 || handle.osHandle < 0) {
256         return MAGMA_STATUS_INTERNAL_ERROR;
257     }
258     *handle_out = handle.osHandle;
259     return MAGMA_STATUS_OK;
260 }
261 
magma_buffer_export(void * self,magma_buffer_t buffer,magma_handle_t * handle_out)262 magma_status_t MagmaClientContext::magma_buffer_export(void* self, magma_buffer_t buffer,
263                                                        magma_handle_t* handle_out) {
264     auto context = reinterpret_cast<MagmaClientContext*>(self);
265 
266     int fd;
267     magma_status_t status = context->get_fd_for_buffer(buffer, &fd);
268     if (status != MAGMA_STATUS_OK) return status;
269 
270     *handle_out = fd;
271 
272     return MAGMA_STATUS_OK;
273 }
274 
275 // We can't pass a non-zero timeout to the server, as that would block the server from handling
276 // requests from other threads. So we busy wait here, which isn't ideal; however if the server did
277 // block, gfxstream would busy wait for the response anyway.
magma_poll(void * self,magma_poll_item_t * items,uint32_t count,uint64_t timeout_ns)278 magma_status_t MagmaClientContext::magma_poll(void* self, magma_poll_item_t* items, uint32_t count,
279                                               uint64_t timeout_ns) {
280     auto context = reinterpret_cast<MagmaClientContext*>(self);
281 
282     int64_t time_start = static_cast<int64_t>(get_ns_monotonic(false));
283 
284     int64_t abs_timeout_ns = time_start + timeout_ns;
285 
286     if (abs_timeout_ns < time_start) {
287         abs_timeout_ns = std::numeric_limits<int64_t>::max();
288     }
289 
290     bool warned_for_long_poll = false;
291 
292     while (true) {
293         magma_status_t status = context->magma_poll_enc_(self, items, count, 0);
294 
295         if (status != MAGMA_STATUS_TIMED_OUT) return status;
296 
297         // Not ready, allow other threads to work in with us
298         get_thread_local_context_lock()->unlock();
299 
300         std::this_thread::yield();
301 
302         int64_t time_now = static_cast<int64_t>(get_ns_monotonic(false));
303 
304         // TODO(fxb/122604): Add back-off to the busy loop, ideally based on recent sleep
305         // patterns (e.g. start polling shortly before next expected burst).
306         if (!warned_for_long_poll && time_now - time_start > 5000000000) {
307             ALOGE("magma_poll: long poll detected (%lu us)", (time_now - time_start) / 1000);
308             warned_for_long_poll = true;
309         }
310 
311         if (time_now >= abs_timeout_ns) break;
312 
313         get_thread_local_context_lock()->lock();
314     }
315 
316     return MAGMA_STATUS_TIMED_OUT;
317 }
318 
319 // Magma 1.0 no longer tracks buffer size and id on behalf of the client, so we mirror it here.
magma_connection_create_buffer(void * self,magma_connection_t connection,uint64_t size,uint64_t * size_out,magma_buffer_t * buffer_out,magma_buffer_id_t * id_out)320 magma_status_t MagmaClientContext::magma_connection_create_buffer(void* self,
321                                                                   magma_connection_t connection,
322                                                                   uint64_t size, uint64_t* size_out,
323                                                                   magma_buffer_t* buffer_out,
324                                                                   magma_buffer_id_t* id_out) {
325     auto context = reinterpret_cast<MagmaClientContext*>(self);
326 
327     // TODO(b/277219980): support guest-allocated buffers
328     magma_status_t status = context->magma_connection_create_buffer_enc_(
329         self, connection, size, size_out, buffer_out, id_out);
330     if (status != MAGMA_STATUS_OK) return status;
331 
332     auto [_, inserted] = context->buffer_info_.emplace(
333         *buffer_out, BufferInfo{.connection = connection, .size = *size_out, .id = *id_out});
334     if (!inserted) {
335         ALOGE("magma_connection_create_buffer: duplicate entry in buffer info map");
336         return MAGMA_STATUS_INTERNAL_ERROR;
337     }
338 
339     return MAGMA_STATUS_OK;
340 }
341 
magma_connection_release_buffer(void * self,magma_connection_t connection,magma_buffer_t buffer)342 void MagmaClientContext::magma_connection_release_buffer(void* self, magma_connection_t connection,
343                                                          magma_buffer_t buffer) {
344     auto context = reinterpret_cast<MagmaClientContext*>(self);
345 
346     context->magma_connection_release_buffer_enc_(self, connection, buffer);
347 
348     // Invalid buffer or connection is treated as no-op by magma, so only log as verbose.
349     auto it = context->buffer_info_.find(buffer);
350     if (it == context->buffer_info_.end()) {
351         ALOGV("magma_connection_release_buffer: buffer (%lu) not found in map", buffer);
352         return;
353     }
354     if (it->second.connection != connection) {
355         ALOGV(
356             "magma_connection_release_buffer: buffer (%lu) attempted release using wrong "
357             "connection (expected %lu, received %lu)",
358             buffer, it->second.connection, connection);
359         return;
360     }
361     context->buffer_info_.erase(it);
362 }
363 
364 template <typename T, typename U>
SafeCast(const U & value)365 static T SafeCast(const U& value) {
366     if (value > std::numeric_limits<T>::max() || value < std::numeric_limits<T>::min()) {
367         abort();
368     }
369     return static_cast<T>(value);
370 }
371 
372 // We have a singleton client context for all threads.  We want all client
373 // threads served by a single server RenderThread.
GetMagmaContext()374 MagmaClientContext* GetMagmaContext() {
375     static MagmaClientContext* s_context;
376     static std::once_flag once_flag;
377 
378     std::call_once(once_flag, []() {
379         auto stream = createVirtioGpuAddressSpaceStream(kCapsetGfxStreamMagma, nullptr);
380         assert(stream);
381 
382         // RenderThread expects flags: send zero 'clientFlags' to the host.
383         {
384             auto pClientFlags =
385                 reinterpret_cast<unsigned int*>(stream->allocBuffer(sizeof(unsigned int)));
386             *pClientFlags = 0;
387             stream->commitBuffer(sizeof(unsigned int));
388         }
389 
390         s_context = new MagmaClientContext(stream);
391         auto render_node_fd =
392             VirtGpuDevice::getInstance(VirtGpuCapset::kCapsetGfxStreamMagma)->getDeviceHandle();
393         s_context->render_node_fd_ = SafeCast<int>(render_node_fd);
394 
395         ALOGE("Created new context\n");
396         fflush(stdout);
397     });
398 
399     return s_context;
400 }
401 
402 // Used in magma_entry.cpp
403 // Always lock around the encoding methods because we have a singleton context.
404 #define GET_CONTEXT                              \
405     MagmaClientContext* ctx = GetMagmaContext(); \
406     ContextLock lock(ctx)
407 
408 #include "magma_entry.cpp"
409