1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include <cutils/ashmem.h>
17 #include <dlfcn.h>
18 #include <errno.h>
19 #include <gralloc_cb_bp.h>
20 #include <hardware/gralloc.h>
21 #include <limits.h>
22 #include <pthread.h>
23 #include <qemu_pipe_bp.h>
24 #include <string.h>
25 #include <sys/mman.h>
26 #include <unistd.h>
27
28 #include "FormatConversions.h"
29 #include "HostConnection.h"
30 #include "ProcessPipe.h"
31 #include "ThreadInfo.h"
32 #include "aemu/base/threads/AndroidThread.h"
33 #include "glUtils.h"
34 #include "goldfish_address_space.h"
35 #include "gralloc_common.h"
36
37 #if PLATFORM_SDK_VERSION < 26
38 #include <cutils/log.h>
39 #else
40 #include <log/log.h>
41 #endif
42 #include <cutils/properties.h>
43
44 #include <set>
45 #include <map>
46 #include <vector>
47 #include <string>
48 #include <sstream>
49
50 /* Set to 1 or 2 to enable debug traces */
51 #define DEBUG 0
52
53 #ifndef D
54
55 #if DEBUG >= 1
56 # define D(...) ALOGD(__VA_ARGS__)
57 #else
58 # define D(...) ((void)0)
59 #endif
60
61 #endif
62
63 #if DEBUG >= 2
64 # define DD(...) ALOGD(__VA_ARGS__)
65 #else
66 # define DD(...) ((void)0)
67 #endif
68
69 #define DBG_FUNC DBG("%s\n", __FUNCTION__)
70
71 #define GOLDFISH_OFFSET_UNIT 8
72
73 #define OMX_COLOR_FormatYUV420Planar 19
74
75 #ifdef GOLDFISH_HIDL_GRALLOC
76 static const bool isHidlGralloc = true;
77 #else
78 static const bool isHidlGralloc = false;
79 #endif
80
81 using gfxstream::guest::getCurrentThreadId;
82
83 const uint32_t CB_HANDLE_MAGIC_OLD = CB_HANDLE_MAGIC_BASE | 0x1;
84 const int kBufferFdIndex = 0;
85 const int kHostHandleRefCountIndex = 1;
86
87 struct cb_handle_old_t : public cb_handle_t {
cb_handle_old_tcb_handle_old_t88 cb_handle_old_t(int p_fd, int p_ashmemSize, int p_usage,
89 int p_width, int p_height,
90 int p_format, int p_glFormat, int p_glType)
91 : cb_handle_t(CB_HANDLE_MAGIC_OLD,
92 0,
93 p_format,
94 p_width,
95 p_ashmemSize,
96 ~uint64_t(0)),
97 usage(p_usage),
98 width(p_width),
99 height(p_height),
100 glFormat(p_glFormat),
101 glType(p_glType),
102 ashmemBasePid(0),
103 mappedPid(0),
104 bufferPtrLo(0),
105 bufferPtrHi(0),
106 lockedLeft(0),
107 lockedTop(0),
108 lockedWidth(0),
109 lockedHeight(0) {
110 fds[kBufferFdIndex] = p_fd;
111 numFds = 1;
112 numInts = CB_HANDLE_NUM_INTS(numFds);
113 }
114
hasRefcountPipecb_handle_old_t115 bool hasRefcountPipe() const {
116 return qemu_pipe_valid(fds[kHostHandleRefCountIndex]);
117 }
118
setRefcountPipeFdcb_handle_old_t119 void setRefcountPipeFd(QEMU_PIPE_HANDLE fd) {
120 if (qemu_pipe_valid(fd)) {
121 numFds++;
122 }
123 fds[kHostHandleRefCountIndex] = fd;
124 numInts = CB_HANDLE_NUM_INTS(numFds);
125 }
126
canBePostedcb_handle_old_t127 bool canBePosted() const {
128 return (0 != (usage & GRALLOC_USAGE_HW_FB));
129 }
130
getBufferPtrcb_handle_old_t131 void* getBufferPtr() const {
132 const uint64_t addr = (uint64_t(bufferPtrHi) << 32) | bufferPtrLo;
133 return reinterpret_cast<void*>(static_cast<uintptr_t>(addr));
134 }
135
setBufferPtrcb_handle_old_t136 void setBufferPtr(void* ptr) {
137 const uint64_t addr = static_cast<uint64_t>(reinterpret_cast<uintptr_t>(ptr));
138 bufferPtrLo = uint32_t(addr);
139 bufferPtrHi = uint32_t(addr >> 32);
140 }
141
isValidcb_handle_old_t142 bool isValid() const {
143 return (version == sizeof(native_handle)) && (magic == CB_HANDLE_MAGIC_OLD);
144 }
145
fromcb_handle_old_t146 static cb_handle_old_t* from(void* p) {
147 if (!p) { return NULL; }
148 cb_handle_old_t* cb = static_cast<cb_handle_old_t*>(p);
149 return cb->isValid() ? cb : NULL;
150 }
151
fromcb_handle_old_t152 static const cb_handle_old_t* from(const void* p) {
153 return from(const_cast<void*>(p));
154 }
155
from_unconstcb_handle_old_t156 static cb_handle_old_t* from_unconst(const void* p) {
157 return from(const_cast<void*>(p));
158 }
159
160 uint32_t usage; // usage bits the buffer was created with
161 uint32_t width; // buffer width
162 uint32_t height; // buffer height
163 uint32_t glFormat; // OpenGL format enum used for host h/w color buffer
164 uint32_t glType; // OpenGL type enum used when uploading to host
165 int32_t ashmemBasePid; // process id which mapped the ashmem region
166 int32_t mappedPid; // process id which succeeded gralloc_register call
167 uint32_t bufferPtrLo;
168 uint32_t bufferPtrHi;
169 uint32_t lockedLeft; // region of buffer locked for s/w write
170 uint32_t lockedTop;
171 uint32_t lockedWidth;
172 uint32_t lockedHeight;
173 };
174
getOpenCountPtr(const cb_handle_old_t * cb)175 int32_t* getOpenCountPtr(const cb_handle_old_t* cb) {
176 return ((int32_t*)cb->getBufferPtr()) + 1;
177 }
178
getAshmemColorOffset(cb_handle_old_t * cb)179 uint32_t getAshmemColorOffset(cb_handle_old_t* cb) {
180 uint32_t res = 0;
181 if (cb->canBePosted()) res = GOLDFISH_OFFSET_UNIT;
182 if (isHidlGralloc) res = GOLDFISH_OFFSET_UNIT * 2;
183 return res;
184 }
185
186 //
187 // our private gralloc module structure
188 //
189 struct private_module_t {
190 gralloc_module_t base;
191 };
192
193 /* If not NULL, this is a pointer to the fallback module.
194 * This really is gralloc.default, which we'll use if we detect
195 * that the emulator we're running in does not support GPU emulation.
196 */
197 static gralloc_module_t* sFallback;
198 static pthread_once_t sFallbackOnce = PTHREAD_ONCE_INIT;
199
200 static void fallback_init(void); // forward
201
202 //
203 // Our gralloc device structure (alloc interface)
204 //
205 struct gralloc_device_t {
206 alloc_device_t device;
207 std::set<buffer_handle_t> allocated;
208 pthread_mutex_t lock;
209 };
210
211 struct gralloc_memregions_t {
212 typedef std::map<void*, uint32_t> MemRegionMap; // base -> refCount
213 typedef MemRegionMap::const_iterator mem_region_handle_t;
214
gralloc_memregions_tgralloc_memregions_t215 gralloc_memregions_t() {
216 pthread_mutex_init(&lock, NULL);
217 }
218
219 MemRegionMap ashmemRegions;
220 pthread_mutex_t lock;
221 };
222
223 #define INITIAL_DMA_REGION_SIZE 4096
224 struct gralloc_dmaregion_t {
gralloc_dmaregion_tgralloc_dmaregion_t225 gralloc_dmaregion_t(ExtendedRCEncoderContext *rcEnc)
226 : host_memory_allocator(
227 rcEnc->featureInfo_const()->hasSharedSlotsHostMemoryAllocator),
228 sz(INITIAL_DMA_REGION_SIZE),
229 refcount(0),
230 bigbufCount(0) {
231 pthread_mutex_init(&lock, NULL);
232
233 if (rcEnc->hasDirectMem()) {
234 host_memory_allocator.hostMalloc(&address_space_block, sz);
235 }
236 }
237
238 GoldfishAddressSpaceHostMemoryAllocator host_memory_allocator;
239 GoldfishAddressSpaceBlock address_space_block;
240 uint32_t sz;
241 uint32_t refcount;
242 pthread_mutex_t lock;
243 uint32_t bigbufCount;
244 };
245
246 // global device instance
247 static gralloc_memregions_t* s_memregions = NULL;
248 static gralloc_dmaregion_t* s_grdma = NULL;
249
init_gralloc_memregions()250 static gralloc_memregions_t* init_gralloc_memregions() {
251 if (!s_memregions) {
252 s_memregions = new gralloc_memregions_t;
253 }
254 return s_memregions;
255 }
256
has_DMA_support(const ExtendedRCEncoderContext * rcEnc)257 static bool has_DMA_support(const ExtendedRCEncoderContext *rcEnc) {
258 return rcEnc->getDmaVersion() > 0 || rcEnc->hasDirectMem();
259 }
260
init_gralloc_dmaregion(ExtendedRCEncoderContext * rcEnc)261 static gralloc_dmaregion_t* init_gralloc_dmaregion(ExtendedRCEncoderContext *rcEnc) {
262 D("%s: call\n", __func__);
263 if (!s_grdma) {
264 s_grdma = new gralloc_dmaregion_t(rcEnc);
265 }
266 return s_grdma;
267 }
268
get_gralloc_region(ExtendedRCEncoderContext * rcEnc)269 static void get_gralloc_region(ExtendedRCEncoderContext *rcEnc) {
270 gralloc_dmaregion_t* grdma = init_gralloc_dmaregion(rcEnc);
271
272 pthread_mutex_lock(&grdma->lock);
273 grdma->refcount++;
274 D("%s: call. refcount: %u\n", __func__, grdma->refcount);
275 pthread_mutex_unlock(&grdma->lock);
276 }
277
resize_gralloc_dmaregion_locked(gralloc_dmaregion_t * grdma,uint32_t new_sz)278 static void resize_gralloc_dmaregion_locked(gralloc_dmaregion_t* grdma, uint32_t new_sz) {
279 grdma->sz = new_sz;
280 }
281
282 // max dma size: 2x 4K rgba8888
283 #define MAX_DMA_SIZE 66355200
284
put_gralloc_region_direct_mem_locked(gralloc_dmaregion_t * grdma,uint32_t)285 static bool put_gralloc_region_direct_mem_locked(gralloc_dmaregion_t* grdma, uint32_t /* sz, unused */) {
286 const bool shouldDelete = !grdma->refcount;
287 if (shouldDelete) {
288 grdma->host_memory_allocator.hostFree(&grdma->address_space_block);
289 }
290
291 return shouldDelete;
292 }
293
put_gralloc_region_dma_locked(gralloc_dmaregion_t * grdma,uint32_t sz)294 static bool put_gralloc_region_dma_locked(gralloc_dmaregion_t* grdma, uint32_t sz) {
295 D("%s: call. refcount before: %u\n", __func__, grdma->refcount);
296 grdma->refcount--;
297 if (sz > MAX_DMA_SIZE && grdma->bigbufCount) {
298 grdma->bigbufCount--;
299 }
300 bool shouldDelete = !grdma->refcount;
301 if (shouldDelete) {
302 D("%s: should delete!\n", __func__);
303 resize_gralloc_dmaregion_locked(grdma, INITIAL_DMA_REGION_SIZE);
304 D("%s: done\n", __func__);
305 }
306 D("%s: exit\n", __func__);
307 return shouldDelete;
308 }
309
put_gralloc_region(ExtendedRCEncoderContext * rcEnc,uint32_t sz)310 static bool put_gralloc_region(ExtendedRCEncoderContext *rcEnc, uint32_t sz) {
311 bool shouldDelete;
312
313 gralloc_dmaregion_t* grdma = init_gralloc_dmaregion(rcEnc);
314 pthread_mutex_lock(&grdma->lock);
315 if (rcEnc->hasDirectMem()) {
316 shouldDelete = put_gralloc_region_direct_mem_locked(grdma, sz);
317 } else if (rcEnc->getDmaVersion() > 0) {
318 shouldDelete = put_gralloc_region_dma_locked(grdma, sz);
319 } else {
320 shouldDelete = false;
321 }
322 pthread_mutex_unlock(&grdma->lock);
323
324 return shouldDelete;
325 }
326
gralloc_dmaregion_register_ashmem_direct_mem_locked(gralloc_dmaregion_t * grdma,uint32_t new_sz)327 static void gralloc_dmaregion_register_ashmem_direct_mem_locked(gralloc_dmaregion_t* grdma, uint32_t new_sz) {
328 if (new_sz == grdma->sz) return;
329
330 GoldfishAddressSpaceHostMemoryAllocator* allocator = &grdma->host_memory_allocator;
331 GoldfishAddressSpaceBlock* block = &grdma->address_space_block;
332 allocator->hostFree(block);
333 allocator->hostMalloc(block, new_sz);
334 grdma->sz = new_sz;
335 }
336
gralloc_dmaregion_register_ashmem_dma_locked(gralloc_dmaregion_t * grdma,uint32_t new_sz)337 static void gralloc_dmaregion_register_ashmem_dma_locked(gralloc_dmaregion_t* grdma, uint32_t new_sz) {
338 if (new_sz != grdma->sz) {
339 if (new_sz > MAX_DMA_SIZE) {
340 D("%s: requested sz %u too large (limit %u), set to fallback.",
341 __func__, new_sz, MAX_DMA_SIZE);
342 grdma->bigbufCount++;
343 } else {
344 D("%s: change sz from %u to %u", __func__, grdma->sz, new_sz);
345 resize_gralloc_dmaregion_locked(grdma, new_sz);
346 }
347 }
348 }
349
gralloc_dmaregion_register_ashmem(ExtendedRCEncoderContext * rcEnc,uint32_t sz)350 static void gralloc_dmaregion_register_ashmem(ExtendedRCEncoderContext *rcEnc, uint32_t sz) {
351 gralloc_dmaregion_t* grdma = init_gralloc_dmaregion(rcEnc);
352
353 pthread_mutex_lock(&grdma->lock);
354 D("%s: for sz %u, refcount %u", __func__, sz, grdma->refcount);
355 const uint32_t new_sz = std::max(grdma->sz, sz);
356
357 if (rcEnc->hasDirectMem()) {
358 gralloc_dmaregion_register_ashmem_direct_mem_locked(grdma, new_sz);
359 } else if (rcEnc->getDmaVersion() > 0) {
360 gralloc_dmaregion_register_ashmem_dma_locked(grdma, new_sz);
361 } else {
362 ALOGE("%s: unexpected DMA type", __func__);
363 }
364
365 pthread_mutex_unlock(&grdma->lock);
366 }
367
get_mem_region(void * ashmemBase)368 static void get_mem_region(void* ashmemBase) {
369 D("%s: call for %p", __func__, ashmemBase);
370
371 gralloc_memregions_t* memregions = init_gralloc_memregions();
372
373 pthread_mutex_lock(&memregions->lock);
374 ++memregions->ashmemRegions[ashmemBase];
375 pthread_mutex_unlock(&memregions->lock);
376 }
377
put_mem_region(ExtendedRCEncoderContext *,void * ashmemBase)378 static bool put_mem_region(ExtendedRCEncoderContext *, void* ashmemBase) {
379 D("%s: call for %p", __func__, ashmemBase);
380
381 gralloc_memregions_t* memregions = init_gralloc_memregions();
382 bool shouldRemove;
383
384 pthread_mutex_lock(&memregions->lock);
385 gralloc_memregions_t::MemRegionMap::iterator i = memregions->ashmemRegions.find(ashmemBase);
386 if (i == memregions->ashmemRegions.end()) {
387 shouldRemove = true;
388 ALOGE("%s: error: tried to put a nonexistent mem region (%p)!", __func__, ashmemBase);
389 } else {
390 shouldRemove = --i->second == 0;
391 if (shouldRemove) {
392 memregions->ashmemRegions.erase(i);
393 }
394 }
395 pthread_mutex_unlock(&memregions->lock);
396
397 return shouldRemove;
398 }
399
400 #if DEBUG
dump_regions(ExtendedRCEncoderContext *)401 static void dump_regions(ExtendedRCEncoderContext *) {
402 gralloc_memregions_t* memregions = init_gralloc_memregions();
403 gralloc_memregions_t::mem_region_handle_t curr = memregions->ashmemRegions.begin();
404 std::stringstream res;
405 for (; curr != memregions->ashmemRegions.end(); ++curr) {
406 res << "\tashmem base " << curr->first << " refcount " << curr->second << "\n";
407 }
408 ALOGD("ashmem region dump [\n%s]", res.str().c_str());
409 }
410 #endif
411
get_ashmem_region(ExtendedRCEncoderContext * rcEnc,cb_handle_old_t * cb)412 static void get_ashmem_region(ExtendedRCEncoderContext *rcEnc, cb_handle_old_t *cb) {
413 #if DEBUG
414 dump_regions(rcEnc);
415 #endif
416
417 get_mem_region(cb->getBufferPtr());
418
419 #if DEBUG
420 dump_regions(rcEnc);
421 #endif
422
423 get_gralloc_region(rcEnc);
424 }
425
put_ashmem_region(ExtendedRCEncoderContext * rcEnc,cb_handle_old_t * cb)426 static bool put_ashmem_region(ExtendedRCEncoderContext *rcEnc, cb_handle_old_t *cb) {
427 #if DEBUG
428 dump_regions(rcEnc);
429 #endif
430
431 const bool should_unmap = put_mem_region(rcEnc, cb->getBufferPtr());
432
433 #if DEBUG
434 dump_regions(rcEnc);
435 #endif
436
437 put_gralloc_region(rcEnc, cb->bufferSize);
438
439 return should_unmap;
440 }
441
map_buffer(cb_handle_old_t * cb,void ** vaddr)442 static int map_buffer(cb_handle_old_t *cb, void **vaddr)
443 {
444 const int bufferFd = cb->fds[kBufferFdIndex];
445 if (bufferFd < 0) {
446 return -EINVAL;
447 }
448
449 void *addr = mmap(0, cb->bufferSize, PROT_READ | PROT_WRITE,
450 MAP_SHARED, bufferFd, 0);
451 if (addr == MAP_FAILED) {
452 ALOGE("%s: failed to map ashmem region!", __FUNCTION__);
453 return -errno;
454 }
455
456 cb->setBufferPtr(addr);
457 cb->ashmemBasePid = getpid();
458 D("%s: %p mapped ashmem base %p size %d\n", __FUNCTION__,
459 cb, addr, cb->bufferSize);
460
461 *vaddr = addr;
462 return 0;
463 }
464
465 static HostConnection* sHostCon = NULL;
466
createOrGetHostConnection()467 static HostConnection* createOrGetHostConnection() {
468 if (!sHostCon) {
469 sHostCon = HostConnection::createUnique().release();
470 }
471 return sHostCon;
472 }
473
474 #define DEFINE_HOST_CONNECTION \
475 HostConnection *hostCon = createOrGetHostConnection(); \
476 ExtendedRCEncoderContext *rcEnc = (hostCon ? hostCon->rcEncoder() : NULL); \
477 bool hasVulkan = rcEnc->featureInfo_const()->hasVulkan; (void)hasVulkan; \
478
479 #define DEFINE_AND_VALIDATE_HOST_CONNECTION \
480 HostConnection *hostCon = createOrGetHostConnection(); \
481 if (!hostCon) { \
482 ALOGE("gralloc: Failed to get host connection\n"); \
483 return -EIO; \
484 } \
485 ExtendedRCEncoderContext *rcEnc = hostCon->rcEncoder(); \
486 if (!rcEnc) { \
487 ALOGE("gralloc: Failed to get renderControl encoder context\n"); \
488 return -EIO; \
489 } \
490 bool hasVulkan = rcEnc->featureInfo_const()->hasVulkan; (void)hasVulkan;\
491
492 #if PLATFORM_SDK_VERSION < 18
493 // On older APIs, just define it as a value no one is going to use.
494 #define HAL_PIXEL_FORMAT_YCbCr_420_888 0xFFFFFFFF
495 #endif
496
updateHostColorBuffer(cb_handle_old_t * cb,bool doLocked,char * pixels)497 static void updateHostColorBuffer(cb_handle_old_t* cb,
498 bool doLocked,
499 char* pixels) {
500 D("%s: call. doLocked=%d", __FUNCTION__, doLocked);
501
502 DEFINE_HOST_CONNECTION;
503 gralloc_dmaregion_t* grdma = init_gralloc_dmaregion(rcEnc);
504
505 int bpp = glUtilsPixelBitSize(cb->glFormat, cb->glType) >> 3;
506 int left = doLocked ? cb->lockedLeft : 0;
507 int top = doLocked ? cb->lockedTop : 0;
508 int width = doLocked ? cb->lockedWidth : cb->width;
509 int height = doLocked ? cb->lockedHeight : cb->height;
510
511 char* to_send = pixels;
512 uint32_t rgbSz = width * height * bpp;
513 uint32_t send_buffer_size = rgbSz;
514 bool is_rgb_format =
515 cb->format != HAL_PIXEL_FORMAT_YV12 &&
516 cb->format != HAL_PIXEL_FORMAT_YCbCr_420_888;
517
518 std::vector<char> convertedBuf;
519
520 if (doLocked && is_rgb_format) {
521 convertedBuf.resize(rgbSz);
522 to_send = &convertedBuf.front();
523 copy_rgb_buffer_from_unlocked(
524 to_send, pixels,
525 cb->width,
526 width, height, top, left, bpp);
527 }
528
529 const bool hasDMA = has_DMA_support(rcEnc);
530 if (hasDMA && grdma->bigbufCount) {
531 D("%s: there are big buffers alive, use fallback (count %u)", __FUNCTION__,
532 grdma->bigbufCount);
533 }
534
535 if (hasDMA && !grdma->bigbufCount) {
536 switch (cb->format) {
537 case HAL_PIXEL_FORMAT_YV12:
538 get_yv12_offsets(width, height, NULL, NULL, &send_buffer_size);
539 break;
540
541 case HAL_PIXEL_FORMAT_YCbCr_420_888:
542 get_yuv420p_offsets(width, height, NULL, NULL, &send_buffer_size);
543 break;
544 }
545
546 if (grdma->address_space_block.guestPtr()) {
547 rcEnc->bindDmaDirectly(grdma->address_space_block.guestPtr(),
548 grdma->address_space_block.physAddr());
549 } else {
550 ALOGE("%s: Unexpected DMA", __func__);
551 }
552
553 D("%s: call. dma update with sz=%u", __func__, send_buffer_size);
554 pthread_mutex_lock(&grdma->lock);
555 rcEnc->rcUpdateColorBufferDMA(rcEnc, cb->hostHandle,
556 left, top, width, height,
557 cb->glFormat, cb->glType,
558 to_send, send_buffer_size);
559 pthread_mutex_unlock(&grdma->lock);
560 } else {
561 switch (cb->format) {
562 case HAL_PIXEL_FORMAT_YV12:
563 convertedBuf.resize(rgbSz);
564 to_send = &convertedBuf.front();
565 D("convert yv12 to rgb888 here");
566 yv12_to_rgb888(to_send, pixels,
567 width, height, left, top,
568 left + width - 1, top + height - 1);
569 break;
570
571 case HAL_PIXEL_FORMAT_YCbCr_420_888:
572 convertedBuf.resize(rgbSz);
573 to_send = &convertedBuf.front();
574 yuv420p_to_rgb888(to_send, pixels,
575 width, height, left, top,
576 left + width - 1, top + height - 1);
577 break;
578 }
579
580 rcEnc->rcUpdateColorBuffer(rcEnc, cb->hostHandle,
581 left, top, width, height,
582 cb->glFormat, cb->glType, to_send);
583 }
584 }
585
586 //
587 // gralloc device functions (alloc interface)
588 //
gralloc_dump(struct alloc_device_t *,char *,int)589 static void gralloc_dump(struct alloc_device_t* /*dev*/, char* /*buff*/, int /*buff_len*/) {}
590
gralloc_get_buffer_format(const int frameworkFormat,const int usage)591 static int gralloc_get_buffer_format(const int frameworkFormat, const int usage) {
592 // Pick the right concrete pixel format given the endpoints as encoded in
593 // the usage bits. Every end-point pair needs explicit listing here.
594 #if PLATFORM_SDK_VERSION >= 17
595 if (frameworkFormat == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
596 // Camera as producer
597 if (usage & GRALLOC_USAGE_HW_CAMERA_WRITE) {
598 if (usage & GRALLOC_USAGE_HW_TEXTURE) {
599 // Camera-to-display is RGBA
600 return HAL_PIXEL_FORMAT_RGBA_8888;
601 } else if (usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) {
602 // Camera-to-encoder is NV21
603 return HAL_PIXEL_FORMAT_YCrCb_420_SP;
604 }
605 }
606
607 ALOGE("gralloc_alloc: Requested auto format selection, "
608 "but no known format for this usage=%x", usage);
609 return -EINVAL;
610 } else if (frameworkFormat == HAL_PIXEL_FORMAT_YCbCr_420_888) {
611 ALOGW("gralloc_alloc: Requested YCbCr_420_888, taking experimental path. "
612 "usage=%x", usage);
613 } else if (frameworkFormat == OMX_COLOR_FormatYUV420Planar &&
614 (usage & GOLDFISH_GRALLOC_USAGE_GPU_DATA_BUFFER)) {
615 ALOGW("gralloc_alloc: Requested OMX_COLOR_FormatYUV420Planar, given "
616 "YCbCr_420_888, taking experimental path. "
617 "usage=%x", usage);
618 return HAL_PIXEL_FORMAT_YCbCr_420_888;
619 }
620 #endif // PLATFORM_SDK_VERSION >= 17
621
622 return frameworkFormat;
623 }
624
gralloc_alloc(alloc_device_t * dev,int w,int h,const int frameworkFormat,int usage,buffer_handle_t * pHandle,int * pStride)625 static int gralloc_alloc(alloc_device_t* dev,
626 int w, int h, const int frameworkFormat, int usage,
627 buffer_handle_t* pHandle, int* pStride)
628 {
629 D("gralloc_alloc w=%d h=%d usage=0x%x frameworkFormat=0x%x\n", w, h, usage, frameworkFormat);
630
631 gralloc_device_t *grdev = (gralloc_device_t *)dev;
632 if (!grdev || !pHandle || !pStride) {
633 ALOGE("gralloc_alloc: Bad inputs (grdev: %p, pHandle: %p, pStride: %p",
634 grdev, pHandle, pStride);
635 return -EINVAL;
636 }
637
638 const int format = gralloc_get_buffer_format(frameworkFormat, usage);
639 if (format < 0) {
640 return format;
641 }
642
643 //
644 // Note: in screen capture mode, both sw_write and hw_write will be on
645 // and this is a valid usage
646 //
647 bool sw_write = (0 != (usage & GRALLOC_USAGE_SW_WRITE_MASK));
648 bool hw_write = (usage & GRALLOC_USAGE_HW_RENDER); (void)hw_write;
649 bool sw_read = (0 != (usage & GRALLOC_USAGE_SW_READ_MASK));
650 const bool hw_texture = usage & GRALLOC_USAGE_HW_TEXTURE;
651 const bool hw_render = usage & GRALLOC_USAGE_HW_RENDER;
652 const bool hw_2d = usage & GRALLOC_USAGE_HW_2D;
653 const bool hw_composer = usage & GRALLOC_USAGE_HW_COMPOSER;
654 const bool hw_fb = usage & GRALLOC_USAGE_HW_FB;
655 const bool rgb888_unsupported_usage =
656 hw_texture || hw_render || hw_2d || hw_composer || hw_fb;
657 #if PLATFORM_SDK_VERSION >= 17
658 bool hw_cam_write = (usage & GRALLOC_USAGE_HW_CAMERA_WRITE);
659 bool hw_cam_read = (usage & GRALLOC_USAGE_HW_CAMERA_READ);
660 #else // PLATFORM_SDK_VERSION
661 bool hw_cam_write = false;
662 bool hw_cam_read = false;
663 #endif // PLATFORM_SDK_VERSION
664 #if PLATFORM_SDK_VERSION >= 15
665 bool hw_vid_enc_read = usage & GRALLOC_USAGE_HW_VIDEO_ENCODER;
666 #else // PLATFORM_SDK_VERSION
667 bool hw_vid_enc_read = false;
668 #endif // PLATFORM_SDK_VERSION
669
670 bool yuv_format = false;
671 bool raw_format = false;
672 int ashmem_size = 0;
673 int stride = w;
674
675 GLenum glFormat = 0;
676 GLenum glType = 0;
677 EmulatorFrameworkFormat selectedEmuFrameworkFormat = FRAMEWORK_FORMAT_GL_COMPATIBLE;
678
679 int bpp = 0;
680 int align = 1;
681 switch (format) {
682 case HAL_PIXEL_FORMAT_RGBA_8888:
683 case HAL_PIXEL_FORMAT_RGBX_8888:
684 case HAL_PIXEL_FORMAT_BGRA_8888:
685 bpp = 4;
686 glFormat = GL_RGBA;
687 glType = GL_UNSIGNED_BYTE;
688 break;
689 case HAL_PIXEL_FORMAT_RGB_888:
690 if (rgb888_unsupported_usage) {
691 return -EINVAL; // we dont support RGB_888 for HW usage
692 } else {
693 bpp = 3;
694 glFormat = GL_RGB;
695 glType = GL_UNSIGNED_BYTE;
696 break;
697 }
698 case HAL_PIXEL_FORMAT_RGB_565:
699 bpp = 2;
700 // Workaround: distinguish vs the RGB8/RGBA8
701 // by changing |glFormat| to GL_RGB565
702 // (previously, it was still GL_RGB)
703 glFormat = GL_RGB565;
704 glType = GL_UNSIGNED_SHORT_5_6_5;
705 break;
706 #if PLATFORM_SDK_VERSION >= 26
707 case HAL_PIXEL_FORMAT_RGBA_FP16:
708 bpp = 8;
709 glFormat = GL_RGBA16F;
710 glType = GL_HALF_FLOAT;
711 break;
712 case HAL_PIXEL_FORMAT_RGBA_1010102:
713 bpp = 4;
714 glFormat = GL_RGB10_A2;
715 glType = GL_UNSIGNED_INT_2_10_10_10_REV;
716 break;
717 #endif // PLATFORM_SDK_VERSION >= 26
718 #if PLATFORM_SDK_VERSION >= 21
719 case HAL_PIXEL_FORMAT_RAW16:
720 case HAL_PIXEL_FORMAT_Y16:
721 #elif PLATFORM_SDK_VERSION >= 16
722 case HAL_PIXEL_FORMAT_RAW_SENSOR:
723 #endif
724 bpp = 2;
725 align = 16*bpp;
726 if (! ((sw_read || hw_cam_read) && (sw_write || hw_cam_write) ) ) {
727 // Raw sensor data or Y16 only goes between camera and CPU
728 return -EINVAL;
729 }
730 // Not expecting to actually create any GL surfaces for this
731 glFormat = GL_LUMINANCE;
732 glType = GL_UNSIGNED_SHORT;
733 raw_format = true;
734 break;
735 #if PLATFORM_SDK_VERSION >= 17
736 case HAL_PIXEL_FORMAT_BLOB:
737 bpp = 1;
738 if (! (sw_read) ) {
739 // Blob data cannot be used by HW other than camera emulator
740 // But there is a CTS test trying to have access to it
741 // BUG: https://buganizer.corp.google.com/issues/37719518
742 return -EINVAL;
743 }
744 // Not expecting to actually create any GL surfaces for this
745 glFormat = GL_LUMINANCE;
746 glType = GL_UNSIGNED_BYTE;
747 break;
748 #endif // PLATFORM_SDK_VERSION >= 17
749 case HAL_PIXEL_FORMAT_YCrCb_420_SP:
750 align = 1;
751 bpp = 1; // per-channel bpp
752 yuv_format = true;
753 // Not expecting to actually create any GL surfaces for this
754 break;
755 case HAL_PIXEL_FORMAT_YV12:
756 align = 16;
757 bpp = 1; // per-channel bpp
758 yuv_format = true;
759 // We are going to use RGB8888 on the host for Vulkan
760 glFormat = GL_RGBA;
761 glType = GL_UNSIGNED_BYTE;
762 selectedEmuFrameworkFormat = FRAMEWORK_FORMAT_YV12;
763 break;
764 case HAL_PIXEL_FORMAT_YCbCr_420_888:
765 align = 1;
766 bpp = 1; // per-channel bpp
767 yuv_format = true;
768 // We are going to use RGB888 on the host
769 glFormat = GL_RGB;
770 glType = GL_UNSIGNED_BYTE;
771 selectedEmuFrameworkFormat = FRAMEWORK_FORMAT_YUV_420_888;
772 break;
773 default:
774 ALOGE("gralloc_alloc: Unknown format %d", format);
775 return -EINVAL;
776 }
777
778 //
779 // Allocate ColorBuffer handle on the host (only if h/w access is allowed)
780 // Only do this for some h/w usages, not all.
781 // Also do this if we need to read from the surface, in this case the
782 // rendering will still happen on the host but we also need to be able to
783 // read back from the color buffer, which requires that there is a buffer
784 //
785 DEFINE_AND_VALIDATE_HOST_CONNECTION;
786 #if PLATFORM_SDK_VERSION >= 17
787 bool needHostCb = ((!yuv_format && frameworkFormat != HAL_PIXEL_FORMAT_BLOB) ||
788 usage & GOLDFISH_GRALLOC_USAGE_GPU_DATA_BUFFER ||
789 #else
790 bool needHostCb = (!yuv_format ||
791 #endif // !(PLATFORM_SDK_VERSION >= 17)
792 frameworkFormat == HAL_PIXEL_FORMAT_YV12 ||
793 frameworkFormat == HAL_PIXEL_FORMAT_YCbCr_420_888) &&
794 !raw_format &&
795 #if PLATFORM_SDK_VERSION >= 15
796 (usage & (GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER |
797 GRALLOC_USAGE_HW_2D | GRALLOC_USAGE_HW_COMPOSER |
798 GRALLOC_USAGE_HW_VIDEO_ENCODER |
799 GRALLOC_USAGE_HW_FB | GRALLOC_USAGE_SW_READ_MASK))
800 #else // PLATFORM_SDK_VERSION
801 (usage & (GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER |
802 GRALLOC_USAGE_HW_2D |
803 GRALLOC_USAGE_HW_FB | GRALLOC_USAGE_SW_READ_MASK))
804 #endif // PLATFORM_SDK_VERSION
805 ;
806
807 if (isHidlGralloc) {
808 if (needHostCb || (usage & GRALLOC_USAGE_HW_FB)) {
809 // keep space for postCounter
810 // AND openCounter for all host cb
811 ashmem_size += GOLDFISH_OFFSET_UNIT * 2;
812 }
813 } else {
814 if (usage & GRALLOC_USAGE_HW_FB) {
815 // keep space for postCounter
816 ashmem_size += GOLDFISH_OFFSET_UNIT * 1;
817 }
818 }
819
820 // API26 always expect at least one file descriptor is associated with
821 // one color buffer
822 // BUG: 37719038
823 if (PLATFORM_SDK_VERSION >= 26 ||
824 sw_read || sw_write || hw_cam_write || hw_vid_enc_read) {
825 // keep space for image on guest memory if SW access is needed
826 // or if the camera is doing writing
827 if (yuv_format) {
828 size_t yStride = (w*bpp + (align - 1)) & ~(align-1);
829 size_t uvStride = (yStride / 2 + (align - 1)) & ~(align-1);
830 size_t uvHeight = h / 2;
831 ashmem_size += yStride * h + 2 * (uvHeight * uvStride);
832 stride = yStride / bpp;
833 } else {
834 size_t bpr = (w*bpp + (align-1)) & ~(align-1);
835 ashmem_size += (bpr * h);
836 stride = bpr / bpp;
837 }
838 }
839
840 D("gralloc_alloc format=%d, ashmem_size=%d, stride=%d, tid %lu\n", format, ashmem_size, stride,
841 getCurrentThreadId());
842
843 //
844 // Allocate space in ashmem if needed
845 //
846 int fd = -1;
847 if (ashmem_size > 0) {
848 // round to page size;
849 ashmem_size = (ashmem_size + (PAGE_SIZE-1)) & ~(PAGE_SIZE-1);
850
851 ALOGD("%s: Creating ashmem region of size %d\n", __FUNCTION__, ashmem_size);
852 fd = ashmem_create_region("gralloc-buffer", ashmem_size);
853 if (fd < 0) {
854 ALOGE("gralloc_alloc failed to create ashmem region: %s\n",
855 strerror(errno));
856 return -errno;
857 }
858 }
859
860 cb_handle_old_t *cb = new cb_handle_old_t(fd, ashmem_size, usage,
861 w, h, format,
862 glFormat, glType);
863
864 if (ashmem_size > 0) {
865 //
866 // map ashmem region if exist
867 //
868 void *vaddr;
869 int err = map_buffer(cb, &vaddr);
870 if (err) {
871 close(fd);
872 delete cb;
873 return err;
874 }
875 }
876
877 const bool hasDMA = has_DMA_support(rcEnc);
878
879 if (needHostCb) {
880 if (hostCon && rcEnc) {
881 GLenum allocFormat = glFormat;
882 // The handling of RGBX_8888 is very subtle. Most of the time
883 // we want it to be treated as RGBA_8888, with the exception
884 // that alpha is always ignored and treated as 1. The solution
885 // is to create 3 channel RGB texture instead and host GL will
886 // handle the Alpha channel.
887 if (HAL_PIXEL_FORMAT_RGBX_8888 == format) {
888 allocFormat = GL_RGB;
889 }
890
891 hostCon->lock();
892 if (hasDMA) {
893 cb->hostHandle = rcEnc->rcCreateColorBufferDMA(rcEnc, w, h, allocFormat, selectedEmuFrameworkFormat);
894 } else {
895 cb->hostHandle = rcEnc->rcCreateColorBuffer(rcEnc, w, h, allocFormat);
896 }
897 hostCon->unlock();
898 }
899
900 if (!cb->hostHandle) {
901 // Could not create colorbuffer on host !!!
902 close(fd);
903 delete cb;
904 ALOGE("%s: failed to create host cb! -EIO", __FUNCTION__);
905 return -EIO;
906 } else {
907 QEMU_PIPE_HANDLE refcountPipeFd = qemu_pipe_open("refcount");
908 if(qemu_pipe_valid(refcountPipeFd)) {
909 cb->setRefcountPipeFd(refcountPipeFd);
910 qemu_pipe_write(refcountPipeFd, &cb->hostHandle, 4);
911 }
912 D("Created host ColorBuffer 0x%x\n", cb->hostHandle);
913 }
914
915 if (isHidlGralloc) { *getOpenCountPtr(cb) = 0; }
916 }
917
918 //
919 // alloc succeeded - insert the allocated handle to the allocated list
920 //
921 pthread_mutex_lock(&grdev->lock);
922 grdev->allocated.insert(cb);
923 pthread_mutex_unlock(&grdev->lock);
924
925 *pHandle = cb;
926 D("%s: alloc succeded, new ashmem base and size: %p %d handle: %p",
927 __FUNCTION__, cb->ashmemBase, cb->ashmemSize, cb);
928 switch (frameworkFormat) {
929 case HAL_PIXEL_FORMAT_YCbCr_420_888:
930 *pStride = 0;
931 break;
932 default:
933 *pStride = stride;
934 break;
935 }
936
937 hostCon->lock();
938 if (hasDMA) {
939 get_gralloc_region(rcEnc); // map_buffer(cb, ...) refers here
940 }
941 hostCon->unlock();
942
943 return 0;
944 }
945
gralloc_free(alloc_device_t * dev,buffer_handle_t handle)946 static int gralloc_free(alloc_device_t* dev,
947 buffer_handle_t handle)
948 {
949 DEFINE_AND_VALIDATE_HOST_CONNECTION;
950
951 const cb_handle_old_t *cb = cb_handle_old_t::from(handle);
952 if (!cb) {
953 ERR("gralloc_free: invalid handle %p", handle);
954 return -EINVAL;
955 }
956
957 D("%s: for buf %p ptr %p size %d\n",
958 __FUNCTION__, handle, cb->getBufferPtr(), cb->bufferSize);
959
960 if (cb->hostHandle && !cb->hasRefcountPipe()) {
961 int32_t openCount = 1;
962 int32_t* openCountPtr = &openCount;
963
964 if (isHidlGralloc && cb->getBufferPtr()) {
965 openCountPtr = getOpenCountPtr(cb);
966 }
967
968 if (*openCountPtr > 0) {
969 D("Closing host ColorBuffer 0x%x\n", cb->hostHandle);
970 hostCon->lock();
971 rcEnc->rcCloseColorBuffer(rcEnc, cb->hostHandle);
972 hostCon->unlock();
973 } else {
974 D("A rcCloseColorBuffer is owed!!! sdk ver: %d", PLATFORM_SDK_VERSION);
975 *openCountPtr = -1;
976 }
977 }
978
979 //
980 // detach and unmap ashmem area if present
981 //
982 const int bufferFd = cb->fds[kBufferFdIndex];
983 if (bufferFd > 0) {
984 if (cb->bufferSize > 0 && cb->getBufferPtr()) {
985 D("%s: unmapped %p", __FUNCTION__, cb->getBufferPtr());
986 munmap(cb->getBufferPtr(), cb->bufferSize);
987 put_gralloc_region(rcEnc, cb->bufferSize);
988 }
989 close(bufferFd);
990 }
991
992 if(qemu_pipe_valid(cb->fds[kHostHandleRefCountIndex])) {
993 qemu_pipe_close(cb->fds[kHostHandleRefCountIndex]);
994 }
995 D("%s: done", __FUNCTION__);
996 // remove it from the allocated list
997 gralloc_device_t *grdev = (gralloc_device_t *)dev;
998
999 pthread_mutex_lock(&grdev->lock);
1000 grdev->allocated.erase(cb);
1001 pthread_mutex_unlock(&grdev->lock);
1002
1003 delete cb;
1004
1005 D("%s: exit", __FUNCTION__);
1006 return 0;
1007 }
1008
gralloc_device_close(struct hw_device_t * dev)1009 static int gralloc_device_close(struct hw_device_t *dev)
1010 {
1011 gralloc_device_t* d = reinterpret_cast<gralloc_device_t*>(dev);
1012 if (d) {
1013 for (std::set<buffer_handle_t>::const_iterator i = d->allocated.begin();
1014 i != d->allocated.end(); ++i) {
1015 gralloc_free(&d->device, *i);
1016 }
1017
1018 delete d;
1019
1020 if (sHostCon) {
1021 delete sHostCon;
1022 sHostCon = nullptr;
1023 }
1024 }
1025 return 0;
1026 }
1027
1028 //
1029 // gralloc module functions - refcount + locking interface
1030 //
gralloc_register_buffer(gralloc_module_t const * module,buffer_handle_t handle)1031 static int gralloc_register_buffer(gralloc_module_t const* module,
1032 buffer_handle_t handle)
1033 {
1034 DEFINE_AND_VALIDATE_HOST_CONNECTION;
1035
1036 D("%s: start", __FUNCTION__);
1037 pthread_once(&sFallbackOnce, fallback_init);
1038 if (sFallback != NULL) {
1039 return sFallback->registerBuffer(sFallback, handle);
1040 }
1041
1042 private_module_t *gr = (private_module_t *)module;
1043 if (!gr) {
1044 return -EINVAL;
1045 }
1046
1047 cb_handle_old_t *cb = cb_handle_old_t::from_unconst(handle);
1048 if (!cb) {
1049 ERR("gralloc_register_buffer(%p): invalid buffer", cb);
1050 return -EINVAL;
1051 }
1052
1053 D("gralloc_register_buffer(%p) w %d h %d format 0x%x",
1054 handle, cb->width, cb->height, cb->format);
1055
1056 if (cb->hostHandle != 0 && !cb->hasRefcountPipe()) {
1057 D("Opening host ColorBuffer 0x%x\n", cb->hostHandle);
1058 hostCon->lock();
1059 rcEnc->rcOpenColorBuffer2(rcEnc, cb->hostHandle);
1060 hostCon->unlock();
1061 }
1062
1063 //
1064 // if the color buffer has ashmem region and it is not mapped in this
1065 // process map it now.
1066 //
1067 if (cb->bufferSize > 0 && cb->mappedPid != getpid()) {
1068 void *vaddr;
1069 int err = map_buffer(cb, &vaddr);
1070 if (err) {
1071 ERR("gralloc_register_buffer(%p): map failed: %s", cb, strerror(-err));
1072 return -err;
1073 }
1074 cb->mappedPid = getpid();
1075
1076 if (isHidlGralloc) {
1077 int32_t* openCountPtr = getOpenCountPtr(cb);
1078 if (!*openCountPtr) *openCountPtr = 1;
1079 }
1080 }
1081
1082 if (cb->bufferSize > 0) {
1083 get_ashmem_region(rcEnc, cb);
1084 }
1085
1086 return 0;
1087 }
1088
gralloc_unregister_buffer(gralloc_module_t const * module,buffer_handle_t handle)1089 static int gralloc_unregister_buffer(gralloc_module_t const* module,
1090 buffer_handle_t handle)
1091 {
1092 DEFINE_AND_VALIDATE_HOST_CONNECTION;
1093
1094 if (sFallback != NULL) {
1095 return sFallback->unregisterBuffer(sFallback, handle);
1096 }
1097
1098 private_module_t *gr = (private_module_t *)module;
1099 if (!gr) {
1100 return -EINVAL;
1101 }
1102
1103 cb_handle_old_t *cb = cb_handle_old_t::from_unconst(handle);
1104 if (!cb) {
1105 ERR("gralloc_unregister_buffer(%p): invalid buffer", cb);
1106 return -EINVAL;
1107 }
1108
1109
1110 if (cb->hostHandle && !cb->hasRefcountPipe()) {
1111 D("Closing host ColorBuffer 0x%x\n", cb->hostHandle);
1112 hostCon->lock();
1113 rcEnc->rcCloseColorBuffer(rcEnc, cb->hostHandle);
1114
1115 if (isHidlGralloc) {
1116 // Queue up another rcCloseColorBuffer if applicable.
1117 // invariant: have ashmem.
1118 if (cb->bufferSize > 0 && cb->mappedPid == getpid()) {
1119 int32_t* openCountPtr = getOpenCountPtr(cb);
1120 if (*openCountPtr == -1) {
1121 D("%s: revenge of the rcCloseColorBuffer!", __func__);
1122 rcEnc->rcCloseColorBuffer(rcEnc, cb->hostHandle);
1123 *openCountPtr = -2;
1124 }
1125 }
1126 }
1127 hostCon->unlock();
1128 }
1129
1130 //
1131 // unmap ashmem region if it was previously mapped in this process
1132 // (through register_buffer)
1133 //
1134 if (cb->bufferSize > 0 && cb->mappedPid == getpid()) {
1135 const bool should_unmap = put_ashmem_region(rcEnc, cb);
1136 if (!should_unmap) goto done;
1137
1138 int err = munmap(cb->getBufferPtr(), cb->bufferSize);
1139 if (err) {
1140 ERR("gralloc_unregister_buffer(%p): unmap failed", cb);
1141 return -EINVAL;
1142 }
1143 cb->bufferSize = 0;
1144 cb->mappedPid = 0;
1145 D("%s: Unregister buffer previous mapped to pid %d", __FUNCTION__, getpid());
1146 }
1147
1148 done:
1149 D("gralloc_unregister_buffer(%p) done\n", cb);
1150 return 0;
1151 }
1152
gralloc_lock(gralloc_module_t const * module,buffer_handle_t handle,int usage,int l,int t,int w,int h,void ** vaddr)1153 static int gralloc_lock(gralloc_module_t const* module,
1154 buffer_handle_t handle, int usage,
1155 int l, int t, int w, int h,
1156 void** vaddr)
1157 {
1158 if (sFallback != NULL) {
1159 return sFallback->lock(sFallback, handle, usage, l, t, w, h, vaddr);
1160 }
1161
1162 private_module_t *gr = (private_module_t *)module;
1163 if (!gr) {
1164 return -EINVAL;
1165 }
1166
1167 cb_handle_old_t *cb = cb_handle_old_t::from_unconst(handle);
1168 if (!cb) {
1169 ALOGE("gralloc_lock bad handle\n");
1170 return -EINVAL;
1171 }
1172
1173 // Validate usage,
1174 // 1. cannot be locked for hw access
1175 // 2. lock for either sw read or write.
1176 // 3. locked sw access must match usage during alloc time.
1177 bool sw_read = (0 != (usage & GRALLOC_USAGE_SW_READ_MASK));
1178 bool sw_write = (0 != (usage & GRALLOC_USAGE_SW_WRITE_MASK));
1179 bool hw_read = (usage & GRALLOC_USAGE_HW_TEXTURE);
1180 bool hw_write = (usage & GRALLOC_USAGE_HW_RENDER);
1181 #if PLATFORM_SDK_VERSION >= 17
1182 bool hw_cam_write = (usage & GRALLOC_USAGE_HW_CAMERA_WRITE);
1183 bool hw_cam_read = (usage & GRALLOC_USAGE_HW_CAMERA_READ);
1184 #else // PLATFORM_SDK_VERSION
1185 bool hw_cam_write = false;
1186 bool hw_cam_read = false;
1187 #endif // PLATFORM_SDK_VERSION
1188
1189 #if PLATFORM_SDK_VERSION >= 15
1190 bool hw_vid_enc_read = (usage & GRALLOC_USAGE_HW_VIDEO_ENCODER);
1191 #else // PLATFORM_SDK_VERSION
1192 bool hw_vid_enc_read = false;
1193 #endif // PLATFORM_SDK_VERSION
1194
1195 bool sw_read_allowed = (0 != (cb->usage & GRALLOC_USAGE_SW_READ_MASK));
1196
1197 #if PLATFORM_SDK_VERSION >= 15
1198 // bug: 30088791
1199 // a buffer was created for GRALLOC_USAGE_HW_VIDEO_ENCODER usage but
1200 // later a software encoder is reading this buffer: this is actually
1201 // legit usage.
1202 sw_read_allowed = sw_read_allowed || (cb->usage & GRALLOC_USAGE_HW_VIDEO_ENCODER);
1203 #endif // PLATFORM_SDK_VERSION >= 15
1204
1205 bool sw_write_allowed = (0 != (cb->usage & GRALLOC_USAGE_SW_WRITE_MASK));
1206
1207 if ( (hw_read || hw_write) ||
1208 (!sw_read && !sw_write &&
1209 !hw_cam_write && !hw_cam_read &&
1210 !hw_vid_enc_read) ||
1211 (sw_read && !sw_read_allowed) ||
1212 (sw_write && !sw_write_allowed) ) {
1213 ALOGE("gralloc_lock usage mismatch usage=0x%x cb->usage=0x%x\n", usage,
1214 cb->usage);
1215 //This is not exactly an error and loose it up.
1216 //bug: 30784436
1217 //return -EINVAL;
1218 }
1219
1220 void *cpu_addr = NULL;
1221
1222 //
1223 // make sure ashmem area is mapped if needed
1224 //
1225 if (cb->canBePosted() || sw_read || sw_write ||
1226 hw_cam_write || hw_cam_read ||
1227 hw_vid_enc_read) {
1228 if (cb->ashmemBasePid != getpid() || !cb->getBufferPtr()) {
1229 return -EACCES;
1230 }
1231
1232 cpu_addr = (void *)((char*)cb->getBufferPtr() + getAshmemColorOffset(cb));
1233 }
1234
1235 if (cb->hostHandle) {
1236 // Make sure we have host connection
1237 DEFINE_AND_VALIDATE_HOST_CONNECTION;
1238 hostCon->lock();
1239
1240 //
1241 // flush color buffer write cache on host and get its sync status.
1242 //
1243 int hostSyncStatus = rcEnc->rcColorBufferCacheFlush(rcEnc, cb->hostHandle,
1244 0,
1245 sw_read);
1246 if (hostSyncStatus < 0) {
1247 // host failed the color buffer sync - probably since it was already
1248 // locked for write access. fail the lock.
1249 ALOGE("gralloc_lock cacheFlush failed sw_read=%d\n", sw_read);
1250 return -EBUSY;
1251 }
1252
1253 // camera delivers bits to the buffer directly and does not require
1254 // an explicit read.
1255 if (sw_read & !(usage & GRALLOC_USAGE_HW_CAMERA_MASK)) {
1256 D("gralloc_lock read back color buffer %d %d ashmem base %p sz %d\n",
1257 cb->width, cb->height, cb->ashmemBase, cb->ashmemSize);
1258 void* rgb_addr = cpu_addr;
1259 char* tmpBuf = 0;
1260 if (cb->format == HAL_PIXEL_FORMAT_YV12 ||
1261 cb->format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
1262 if (rcEnc->hasYUVCache()) {
1263 uint32_t buffer_size;
1264 if (cb->format == HAL_PIXEL_FORMAT_YV12) {
1265 get_yv12_offsets(cb->width, cb->height, NULL, NULL,
1266 &buffer_size);
1267 } else {
1268 get_yuv420p_offsets(cb->width, cb->height, NULL, NULL,
1269 &buffer_size);
1270 }
1271 D("read YUV copy from host");
1272 rcEnc->rcReadColorBufferYUV(rcEnc, cb->hostHandle,
1273 0, 0, cb->width, cb->height,
1274 rgb_addr, buffer_size);
1275 } else {
1276 // We are using RGB888
1277 tmpBuf = new char[cb->width * cb->height * 3];
1278 rcEnc->rcReadColorBuffer(rcEnc, cb->hostHandle,
1279 0, 0, cb->width, cb->height, cb->glFormat, cb->glType, tmpBuf);
1280 if (cb->format == HAL_PIXEL_FORMAT_YV12) {
1281 D("convert rgb888 to yv12 here");
1282 rgb888_to_yv12((char*)cpu_addr, tmpBuf, cb->width, cb->height, l, t, l+w-1, t+h-1);
1283 } else if (cb->format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
1284 D("convert rgb888 to yuv420p here");
1285 rgb888_to_yuv420p((char*)cpu_addr, tmpBuf, cb->width, cb->height, l, t, l+w-1, t+h-1);
1286 }
1287 delete [] tmpBuf;
1288 }
1289 } else {
1290 rcEnc->rcReadColorBuffer(rcEnc, cb->hostHandle,
1291 0, 0, cb->width, cb->height, cb->glFormat, cb->glType, rgb_addr);
1292 }
1293 }
1294
1295 if (has_DMA_support(rcEnc)) {
1296 gralloc_dmaregion_register_ashmem(rcEnc, cb->bufferSize);
1297 }
1298 hostCon->unlock();
1299 }
1300
1301 //
1302 // is virtual address required ?
1303 //
1304 if (sw_read || sw_write || hw_cam_write || hw_cam_read || hw_vid_enc_read) {
1305 *vaddr = cpu_addr;
1306 }
1307
1308 if (sw_write || hw_cam_write) {
1309 //
1310 // Keep locked region if locked for s/w write access.
1311 //
1312 cb->lockedLeft = l;
1313 cb->lockedTop = t;
1314 cb->lockedWidth = w;
1315 cb->lockedHeight = h;
1316 }
1317
1318 DD("gralloc_lock success. vaddr: %p, *vaddr: %p, usage: %x, cpu_addr: %p, base: %p",
1319 vaddr, vaddr ? *vaddr : 0, usage, cpu_addr, cb->ashmemBase);
1320
1321 return 0;
1322 }
1323
gralloc_unlock(gralloc_module_t const * module,buffer_handle_t handle)1324 static int gralloc_unlock(gralloc_module_t const* module,
1325 buffer_handle_t handle)
1326 {
1327 if (sFallback != NULL) {
1328 return sFallback->unlock(sFallback, handle);
1329 }
1330
1331 private_module_t *gr = (private_module_t *)module;
1332 if (!gr) {
1333 return -EINVAL;
1334 }
1335
1336 cb_handle_old_t *cb = cb_handle_old_t::from_unconst(handle);
1337 if (!cb) {
1338 ALOGD("%s: invalid cb handle. -EINVAL", __FUNCTION__);
1339 return -EINVAL;
1340 }
1341
1342 //
1343 // if buffer was locked for s/w write, we need to update the host with
1344 // the updated data
1345 //
1346 if (cb->hostHandle) {
1347
1348 // Make sure we have host connection
1349 DEFINE_AND_VALIDATE_HOST_CONNECTION;
1350 hostCon->lock();
1351
1352 char *cpu_addr = (char*)cb->getBufferPtr() + getAshmemColorOffset(cb);
1353
1354 if (cb->lockedWidth < cb->width || cb->lockedHeight < cb->height) {
1355 updateHostColorBuffer(cb, true, cpu_addr);
1356 }
1357 else {
1358 updateHostColorBuffer(cb, false, cpu_addr);
1359 }
1360
1361 hostCon->unlock();
1362 DD("gralloc_unlock success. cpu_addr: %p", cpu_addr);
1363 }
1364
1365 cb->lockedWidth = cb->lockedHeight = 0;
1366 return 0;
1367 }
1368
1369 #if PLATFORM_SDK_VERSION >= 18
gralloc_lock_ycbcr(gralloc_module_t const * module,buffer_handle_t handle,int usage,int l,int t,int w,int h,android_ycbcr * ycbcr)1370 static int gralloc_lock_ycbcr(gralloc_module_t const* module,
1371 buffer_handle_t handle, int usage,
1372 int l, int t, int w, int h,
1373 android_ycbcr *ycbcr)
1374 {
1375 // Not supporting fallback module for YCbCr
1376 if (sFallback != NULL) {
1377 ALOGD("%s: has fallback, return -EINVAL", __FUNCTION__);
1378 return -EINVAL;
1379 }
1380
1381 if (!ycbcr) {
1382 ALOGE("%s: got NULL ycbcr struct! -EINVAL", __FUNCTION__);
1383 return -EINVAL;
1384 }
1385
1386 private_module_t *gr = (private_module_t *)module;
1387 if (!gr) {
1388 return -EINVAL;
1389 }
1390
1391 cb_handle_old_t *cb = cb_handle_old_t::from_unconst(handle);
1392 if (!cb) {
1393 ALOGE("%s: bad colorbuffer handle. -EINVAL", __FUNCTION__);
1394 return -EINVAL;
1395 }
1396
1397 if (cb->format != HAL_PIXEL_FORMAT_YV12 &&
1398 cb->format != HAL_PIXEL_FORMAT_YCbCr_420_888) {
1399 ALOGE("gralloc_lock_ycbcr can only be used with "
1400 "HAL_PIXEL_FORMAT_YCbCr_420_888 or HAL_PIXEL_FORMAT_YV12, got %x instead. "
1401 "-EINVAL",
1402 cb->format);
1403 return -EINVAL;
1404 }
1405
1406 usage |= (cb->usage & GRALLOC_USAGE_HW_CAMERA_MASK);
1407
1408 void *vaddr;
1409 int ret = gralloc_lock(module, handle, usage, l, t, w, h, &vaddr);
1410 if (ret) {
1411 return ret;
1412 }
1413
1414 uint8_t* cpu_addr = static_cast<uint8_t*>(vaddr);
1415
1416 // Calculate offsets to underlying YUV data
1417 size_t yStride;
1418 size_t cStride;
1419 size_t cSize;
1420 size_t yOffset;
1421 size_t uOffset;
1422 size_t vOffset;
1423 size_t cStep;
1424 size_t align;
1425 switch (cb->format) {
1426 case HAL_PIXEL_FORMAT_YCrCb_420_SP:
1427 yStride = cb->width;
1428 cStride = cb->width;
1429 yOffset = 0;
1430 vOffset = yStride * cb->height;
1431 uOffset = vOffset + 1;
1432 cStep = 2;
1433 break;
1434 case HAL_PIXEL_FORMAT_YV12:
1435 // https://developer.android.com/reference/android/graphics/ImageFormat.html#YV12
1436 align = 16;
1437 yStride = (cb->width + (align -1)) & ~(align-1);
1438 cStride = (yStride / 2 + (align - 1)) & ~(align-1);
1439 yOffset = 0;
1440 cSize = cStride * cb->height/2;
1441 vOffset = yStride * cb->height;
1442 uOffset = vOffset + cSize;
1443 cStep = 1;
1444 break;
1445 case HAL_PIXEL_FORMAT_YCbCr_420_888:
1446 yStride = cb->width;
1447 cStride = yStride / 2;
1448 yOffset = 0;
1449 uOffset = cb->height * yStride;
1450 vOffset = uOffset + cStride * cb->height / 2;
1451 cStep = 1;
1452 break;
1453 default:
1454 ALOGE("gralloc_lock_ycbcr unexpected internal format %x",
1455 cb->format);
1456 return -EINVAL;
1457 }
1458
1459 ycbcr->y = cpu_addr + yOffset;
1460 ycbcr->cb = cpu_addr + uOffset;
1461 ycbcr->cr = cpu_addr + vOffset;
1462 ycbcr->ystride = yStride;
1463 ycbcr->cstride = cStride;
1464 ycbcr->chroma_step = cStep;
1465
1466 // Zero out reserved fields
1467 memset(ycbcr->reserved, 0, sizeof(ycbcr->reserved));
1468
1469 DD("gralloc_lock_ycbcr success. usage: %x, ycbcr.y: %p, .cb: %p, .cr: %p, "
1470 ".ystride: %d , .cstride: %d, .chroma_step: %d, base: %p", usage,
1471 ycbcr->y, ycbcr->cb, ycbcr->cr, ycbcr->ystride, ycbcr->cstride,
1472 ycbcr->chroma_step, cb->ashmemBase);
1473
1474 return 0;
1475 }
1476 #endif // PLATFORM_SDK_VERSION >= 18
1477
gralloc_device_open(const hw_module_t * module,const char * name,hw_device_t ** device)1478 static int gralloc_device_open(const hw_module_t* module,
1479 const char* name,
1480 hw_device_t** device)
1481 {
1482 int status = -EINVAL;
1483
1484 D("gralloc_device_open %s\n", name);
1485
1486 pthread_once( &sFallbackOnce, fallback_init );
1487 if (sFallback != NULL) {
1488 return sFallback->common.methods->open(&sFallback->common, name, device);
1489 }
1490
1491 if (!strcmp(name, GRALLOC_HARDWARE_GPU0)) {
1492
1493 // Create host connection and keep it in the TLS.
1494 // return error if connection with host can not be established
1495 HostConnection *hostConn = createOrGetHostConnection();
1496 if (!hostConn) {
1497 ALOGE("gralloc: failed to get host connection while opening %s\n", name);
1498 return -EIO;
1499 }
1500
1501 //
1502 // Allocate memory for the gralloc device (alloc interface)
1503 //
1504 gralloc_device_t *dev = new gralloc_device_t;
1505 if (NULL == dev) {
1506 return -ENOMEM;
1507 }
1508
1509 // Initialize our device structure
1510 //
1511 dev->device.common.tag = HARDWARE_DEVICE_TAG;
1512 dev->device.common.version = 0;
1513 dev->device.common.module = const_cast<hw_module_t*>(module);
1514 dev->device.common.close = gralloc_device_close;
1515
1516 dev->device.alloc = gralloc_alloc;
1517 dev->device.free = gralloc_free;
1518 dev->device.dump = gralloc_dump;
1519 pthread_mutex_init(&dev->lock, NULL);
1520
1521 *device = &dev->device.common;
1522 status = 0;
1523 }
1524
1525 return status;
1526 }
1527
1528 //
1529 // define the HMI symbol - our module interface
1530 //
1531 static struct hw_module_methods_t gralloc_module_methods = {
1532 .open = gralloc_device_open,
1533 };
1534
1535 struct private_module_t HAL_MODULE_INFO_SYM = {
1536 base: {
1537 common: {
1538 tag: HARDWARE_MODULE_TAG,
1539 #if PLATFORM_SDK_VERSION >= 18
1540 module_api_version: GRALLOC_MODULE_API_VERSION_0_2,
1541 hal_api_version: 0,
1542 #elif PLATFORM_SDK_VERSION >= 16
1543 module_api_version: 1,
1544 hal_api_version: 0,
1545 #else // PLATFORM_SDK_VERSION
1546 version_major: 1,
1547 version_minor: 0,
1548 #endif // PLATFORM_SDK_VERSION
1549 id: GRALLOC_HARDWARE_MODULE_ID,
1550 name: "Graphics Memory Allocator Module",
1551 author: "The Android Open Source Project",
1552 methods: &gralloc_module_methods,
1553 dso: NULL,
1554 reserved: {0, }
1555 },
1556 registerBuffer: gralloc_register_buffer,
1557 unregisterBuffer: gralloc_unregister_buffer,
1558 lock: gralloc_lock,
1559 unlock: gralloc_unlock,
1560 perform: NULL,
1561 #if PLATFORM_SDK_VERSION >= 18
1562 lock_ycbcr: gralloc_lock_ycbcr,
1563 #endif // PLATFORM_SDK_VERSION >= 18
1564 #if PLATFORM_SDK_VERSION >= 29 // For Q and later
1565 getTransportSize: NULL,
1566 validateBufferSize: NULL,
1567 #endif // PLATFORM_SDK_VERSION >= 29
1568 }
1569 };
1570
1571 /* This function is called once to detect whether the emulator supports
1572 * GPU emulation (this is done by looking at the qemu.gles kernel
1573 * parameter, which must be == 1 if this is the case).
1574 *
1575 * If not, then load gralloc.default instead as a fallback.
1576 */
1577
1578 #if __LP64__
1579 static const char kGrallocDefaultSystemPath[] = "/system/lib64/hw/gralloc.goldfish.default.so";
1580 static const char kGrallocDefaultVendorPath[] = "/vendor/lib64/hw/gralloc.goldfish.default.so";
1581 static const char kGrallocDefaultSystemPathPreP[] = "/system/lib64/hw/gralloc.default.so";
1582 static const char kGrallocDefaultVendorPathPreP[] = "/vendor/lib64/hw/gralloc.default.so";
1583 #else
1584 static const char kGrallocDefaultSystemPath[] = "/system/lib/hw/gralloc.goldfish.default.so";
1585 static const char kGrallocDefaultVendorPath[] = "/vendor/lib/hw/gralloc.goldfish.default.so";
1586 static const char kGrallocDefaultSystemPathPreP[] = "/system/lib/hw/gralloc.default.so";
1587 static const char kGrallocDefaultVendorPathPreP[] = "/vendor/lib/hw/gralloc.default.so";
1588 #endif
1589
1590 static void
fallback_init(void)1591 fallback_init(void)
1592 {
1593 char prop[PROPERTY_VALUE_MAX];
1594 void* module;
1595
1596 // cuttlefish case: no fallback (if we use sw rendering,
1597 // we are not using this lib anyway (would use minigbm))
1598 property_get("ro.boot.hardware", prop, "");
1599
1600 bool isValid = prop[0] != '\0';
1601
1602 if (isValid && !strcmp(prop, "cutf_cvm")) {
1603 return;
1604 }
1605
1606 // qemu.gles=0 -> no GLES 2.x support (only 1.x through software).
1607 // qemu.gles=1 -> host-side GPU emulation through EmuGL
1608 // qemu.gles=2 -> guest-side GPU emulation.
1609 property_get("ro.boot.qemu.gles", prop, "999");
1610
1611 bool useFallback = false;
1612 switch (atoi(prop)) {
1613 case 0:
1614 useFallback = true;
1615 break;
1616 case 1:
1617 useFallback = false;
1618 break;
1619 case 2:
1620 useFallback = true;
1621 break;
1622 default:
1623 useFallback = false;
1624 break;
1625 }
1626
1627 if (!useFallback) return;
1628
1629 ALOGD("Emulator without host-side GPU emulation detected. "
1630 "Loading gralloc.default.so from %s...",
1631 kGrallocDefaultVendorPath);
1632 module = dlopen(kGrallocDefaultVendorPath, RTLD_LAZY | RTLD_LOCAL);
1633 if (!module) {
1634 module = dlopen(kGrallocDefaultVendorPathPreP, RTLD_LAZY | RTLD_LOCAL);
1635 }
1636 if (!module) {
1637 // vendor folder didn't work. try system
1638 ALOGD("gralloc.default.so not found in /vendor. Trying %s...",
1639 kGrallocDefaultSystemPath);
1640 module = dlopen(kGrallocDefaultSystemPath, RTLD_LAZY | RTLD_LOCAL);
1641 if (!module) {
1642 module = dlopen(kGrallocDefaultSystemPathPreP, RTLD_LAZY | RTLD_LOCAL);
1643 }
1644 }
1645
1646 if (module != NULL) {
1647 sFallback = reinterpret_cast<gralloc_module_t*>(dlsym(module, HAL_MODULE_INFO_SYM_AS_STR));
1648 if (sFallback == NULL) {
1649 dlclose(module);
1650 }
1651 }
1652 if (sFallback == NULL) {
1653 ALOGE("FATAL: Could not find gralloc.default.so!");
1654 }
1655 }
1656