1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <BufferAllocator/BufferAllocator.h>
18 #include "dmabuf_heap_test.h"
19 
20 #include <linux/ion.h>
21 #include <sys/mman.h>
22 
23 #include <gtest/gtest.h>
24 
25 #include <android-base/logging.h>
26 #include <android-base/properties.h>
27 #include <android-base/unique_fd.h>
28 #include <vintf/VintfObject.h>
29 
30 #include <thread>
31 
32 class DmaBufHeapConcurrentAccessTest : public ::testing::Test {
33   public:
SetUp()34     virtual void SetUp() { allocator = new BufferAllocator(); }
35 
DoAlloc(bool cpu_access_needed)36     void DoAlloc(bool cpu_access_needed) {
37         static const size_t kAllocSizeInBytes = 4096;
38         int map_fd = allocator->AllocSystem(cpu_access_needed, kAllocSizeInBytes);
39         ASSERT_GE(map_fd, 0);
40 
41         void* ptr = mmap(NULL, kAllocSizeInBytes, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
42         ASSERT_TRUE(ptr != MAP_FAILED);
43 
44         int ret = allocator->CpuSyncStart(map_fd, kSyncReadWrite);
45         ASSERT_EQ(0, ret);
46 
47         ret = allocator->CpuSyncEnd(map_fd, kSyncReadWrite);
48         ASSERT_EQ(0, ret);
49 
50         ASSERT_EQ(0, munmap(ptr, kAllocSizeInBytes));
51         ASSERT_EQ(0, close(map_fd));
52     }
53 
DoConcurrentAlloc()54     void DoConcurrentAlloc() {
55         DoAlloc(true /* cpu_access_needed */);
56         DoAlloc(false /* cpu_access_needed */);
57     }
58 
DoConcurrentAllocWithMapName()59     void DoConcurrentAllocWithMapName() {
60         allocator->MapNameToIonHeap(kDmabufSystemHeapName, "" /* no mapping for non-legacy */,
61                                     0 /* no mapping for non-legacy ion */,
62                                     ~0 /* legacy ion heap mask */, ION_FLAG_CACHED);
63         DoAlloc(true /* cpu_access_needed */);
64         allocator->MapNameToIonHeap(
65                 kDmabufSystemUncachedHeapName, "" /* no mapping for non-legacy */,
66                 0 /* no mapping for non-legacy ion */, ~0 /* legacy ion heap mask */);
67         DoAlloc(false /* cpu_access_needed */);
68     }
69 
TearDown()70     virtual void TearDown() { delete allocator; }
71 
72     BufferAllocator* allocator = nullptr;
73 };
74 
75 static constexpr size_t NUM_CONCURRENT_THREADS = 100;
76 
TEST_F(DmaBufHeapConcurrentAccessTest,ConcurrentAllocTest)77 TEST_F(DmaBufHeapConcurrentAccessTest, ConcurrentAllocTest) {
78     using android::vintf::KernelVersion;
79 
80     KernelVersion min_kernel_version = KernelVersion(5, 10, 0);
81     KernelVersion kernel_version =
82             android::vintf::VintfObject::GetInstance()
83                     ->getRuntimeInfo(android::vintf::RuntimeInfo::FetchFlag::CPU_VERSION)
84                     ->kernelVersion();
85     if (kernel_version < min_kernel_version) {
86         GTEST_SKIP();
87     }
88 
89     std::vector<std::thread> threads(NUM_CONCURRENT_THREADS);
90     for (int i = 0; i < NUM_CONCURRENT_THREADS; i++) {
91         threads[i] = std::thread(&DmaBufHeapConcurrentAccessTest::DoConcurrentAlloc, this);
92     }
93 
94     for (auto& thread : threads) {
95         thread.join();
96     }
97 }
98 
TEST_F(DmaBufHeapConcurrentAccessTest,ConcurrentAllocWithMapNameTest)99 TEST_F(DmaBufHeapConcurrentAccessTest, ConcurrentAllocWithMapNameTest) {
100     std::vector<std::thread> threads(NUM_CONCURRENT_THREADS);
101     for (int i = 0; i < NUM_CONCURRENT_THREADS; i++) {
102         threads[i] =
103                 std::thread(&DmaBufHeapConcurrentAccessTest::DoConcurrentAllocWithMapName, this);
104     }
105 
106     for (auto& thread : threads) {
107         thread.join();
108     }
109 }
110 
DmaBufHeapTest()111 DmaBufHeapTest::DmaBufHeapTest() : allocator(new BufferAllocator()) {
112     /*
113      * Legacy ion devices may have hardcoded heap IDs that do not
114      * match the ion UAPI header. Map heap name 'system'/'system-uncached' to a heap mask
115      * of all 1s so that these devices will allocate from the first
116      * available heap when asked to allocate from the system or system-uncached
117      * heap.
118      */
119     if (BufferAllocator::CheckIonSupport()) {
120         allocator->MapNameToIonHeap(kDmabufSystemHeapName, "" /* no mapping for non-legacy */,
121                                     0 /* no mapping for non-legacy ion */,
122                                     ~0 /* legacy ion heap mask */);
123         allocator->MapNameToIonHeap(
124                 kDmabufSystemUncachedHeapName, "" /* no mapping for non-legacy */,
125                 0 /* no mapping for non-legacy ion */, ~0 /* legacy ion heap mask */);
126     }
127 }
128 
TEST_F(DmaBufHeapTest,Allocate)129 TEST_F(DmaBufHeapTest, Allocate) {
130     static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
131     for (bool cpu_access_needed : {false, true}) {
132         for (size_t size : allocationSizes) {
133             SCOPED_TRACE(::testing::Message()
134                          << "cpu_access_needed: " << cpu_access_needed << " size: " << size);
135             int fd = allocator->AllocSystem(cpu_access_needed, size);
136             ASSERT_GE(fd, 0);
137             ASSERT_EQ(close(fd), 0);  // free the buffer
138         }
139     }
140 }
141 
TEST_F(DmaBufHeapTest,AllocateCachedNeedsSync)142 TEST_F(DmaBufHeapTest, AllocateCachedNeedsSync) {
143     static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
144     for (size_t size : allocationSizes) {
145         SCOPED_TRACE(::testing::Message()
146                      << "heap: " << kDmabufSystemHeapName << " size: " << size);
147         int fd = allocator->Alloc(kDmabufSystemHeapName, size, ION_FLAG_CACHED_NEEDS_SYNC
148                                   /* ion heap flags will be ignored if using dmabuf heaps */);
149         ASSERT_GE(fd, 0);
150         ASSERT_EQ(close(fd), 0);  // free the buffer
151     }
152 }
153 
TEST_F(DmaBufHeapTest,RepeatedAllocate)154 TEST_F(DmaBufHeapTest, RepeatedAllocate) {
155     static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
156     for (bool cpu_access_needed : {false, true}) {
157         for (size_t size : allocationSizes) {
158             SCOPED_TRACE(::testing::Message()
159                          << "cpu_access_needed: " << cpu_access_needed << " size: " << size);
160             for (unsigned int i = 0; i < 1024; i++) {
161                 SCOPED_TRACE(::testing::Message() << "iteration " << i);
162                 int fd = allocator->AllocSystem(cpu_access_needed, size);
163                 ASSERT_GE(fd, 0);
164                 ASSERT_EQ(close(fd), 0);  // free the buffer
165             }
166         }
167     }
168 }
169 
170 /*
171  * Make sure all heaps always return zeroed pages
172  */
TEST_F(DmaBufHeapTest,Zeroed)173 TEST_F(DmaBufHeapTest, Zeroed) {
174     static const size_t kAllocSizeInBytes = 4096;
175     static const size_t kNumFds = 16;
176 
177     auto zeroes_ptr = std::make_unique<char[]>(kAllocSizeInBytes);
178     int fds[kNumFds];
179     int ret = 0, map_fd = -1;
180     for (unsigned int i = 0; i < kNumFds; i++) {
181         map_fd = allocator->Alloc(kDmabufSystemHeapName, kAllocSizeInBytes);
182         ASSERT_GE(map_fd, 0);
183 
184         void* ptr = mmap(NULL, kAllocSizeInBytes, PROT_WRITE, MAP_SHARED, map_fd, 0);
185         ASSERT_TRUE(ptr != MAP_FAILED);
186 
187         ret = allocator->CpuSyncStart(map_fd, kSyncWrite);
188         ASSERT_EQ(0, ret);
189 
190         memset(ptr, 0xaa, kAllocSizeInBytes);
191 
192         ret = allocator->CpuSyncEnd(map_fd, kSyncWrite);
193         ASSERT_EQ(0, ret);
194 
195         ASSERT_EQ(0, munmap(ptr, kAllocSizeInBytes));
196         fds[i] = map_fd;
197     }
198 
199     for (unsigned int i = 0; i < kNumFds; i++) {
200         ASSERT_EQ(0, close(fds[i]));
201     }
202 
203     map_fd = allocator->Alloc(kDmabufSystemHeapName, kAllocSizeInBytes);
204     ASSERT_GE(map_fd, 0);
205 
206     void* ptr = mmap(NULL, kAllocSizeInBytes, PROT_READ, MAP_SHARED, map_fd, 0);
207     ASSERT_TRUE(ptr != MAP_FAILED);
208 
209     ret = allocator->CpuSyncStart(map_fd);
210     ASSERT_EQ(0, ret);
211 
212     ASSERT_EQ(0, memcmp(ptr, zeroes_ptr.get(), kAllocSizeInBytes));
213 
214     ret = allocator->CpuSyncEnd(map_fd);
215     ASSERT_EQ(0, ret);
216 
217     ASSERT_EQ(0, munmap(ptr, kAllocSizeInBytes));
218     ASSERT_EQ(0, close(map_fd));
219 }
220 
TEST_F(DmaBufHeapTest,TestCpuSync)221 TEST_F(DmaBufHeapTest, TestCpuSync) {
222     static const size_t kAllocSizeInBytes = 4096;
223     auto vec_sync_type = {kSyncRead, kSyncWrite, kSyncReadWrite};
224     for (auto sync_type : vec_sync_type) {
225         int map_fd = allocator->Alloc(kDmabufSystemHeapName, kAllocSizeInBytes);
226         ASSERT_GE(map_fd, 0);
227 
228         void* ptr = mmap(NULL, kAllocSizeInBytes, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
229         ASSERT_TRUE(ptr != MAP_FAILED);
230 
231         int ret = allocator->CpuSyncStart(map_fd, sync_type);
232         ASSERT_EQ(0, ret);
233 
234         ret = allocator->CpuSyncEnd(map_fd, sync_type);
235         ASSERT_EQ(0, ret);
236 
237         ASSERT_EQ(0, munmap(ptr, kAllocSizeInBytes));
238         ASSERT_EQ(0, close(map_fd));
239     }
240 }
241 
CustomCpuSyncStart(int,int,void *)242 int CustomCpuSyncStart(int /* ion_fd */, int /* dma_buf fd */,
243                        void* /* custom_data pointer */) {
244     LOG(INFO) << "In custom cpu sync start callback";
245     return 0;
246 }
247 
CustomCpuSyncEnd(int,int,void *)248 int CustomCpuSyncEnd(int /* ion_fd */, int /* dma_buf fd */,
249                      void* /* custom_data pointer */) {
250     LOG(INFO) << "In custom cpu sync end callback";
251     return 0;
252 }
253 
TEST_F(DmaBufHeapTest,TestCustomLegacyIonSyncCallback)254 TEST_F(DmaBufHeapTest, TestCustomLegacyIonSyncCallback) {
255     static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
256     for (size_t size : allocationSizes) {
257         SCOPED_TRACE(::testing::Message()
258                      << "heap: " << kDmabufSystemHeapName << " size: " << size);
259 
260         int map_fd = allocator->Alloc(kDmabufSystemHeapName, size);
261         ASSERT_GE(map_fd, 0);
262 
263         void* ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
264         ASSERT_TRUE(ptr != MAP_FAILED);
265 
266         int ret = allocator->CpuSyncStart(map_fd, kSyncWrite, CustomCpuSyncStart);
267         ASSERT_EQ(0, ret);
268 
269         memset(ptr, 0xaa, size);
270 
271         ret = allocator->CpuSyncEnd(map_fd, kSyncWrite, CustomCpuSyncEnd);
272         ASSERT_EQ(0, ret);
273 
274         ASSERT_EQ(0, munmap(ptr, size));
275         ASSERT_EQ(0, close(map_fd));
276     }
277 }
278 
TEST_F(DmaBufHeapTest,TestDeviceCapabilityCheck)279 TEST_F(DmaBufHeapTest, TestDeviceCapabilityCheck) {
280     auto heap_list = allocator->GetDmabufHeapList();
281 
282     ASSERT_TRUE(!heap_list.empty() || BufferAllocator::CheckIonSupport());
283 }
284 
TEST_F(DmaBufHeapTest,TestDmabufSystemHeapCompliance)285 TEST_F(DmaBufHeapTest, TestDmabufSystemHeapCompliance) {
286     using android::vintf::KernelVersion;
287 
288     if (android::base::GetIntProperty("ro.vendor.api_level", 0) < __ANDROID_API_S__) {
289         GTEST_SKIP();
290     }
291 
292     KernelVersion min_kernel_version = KernelVersion(5, 10, 0);
293     KernelVersion kernel_version =
294             android::vintf::VintfObject::GetInstance()
295                     ->getRuntimeInfo(android::vintf::RuntimeInfo::FetchFlag::CPU_VERSION)
296                     ->kernelVersion();
297     if (kernel_version < min_kernel_version) {
298         GTEST_SKIP();
299     }
300 
301     auto heap_list = allocator->GetDmabufHeapList();
302     ASSERT_TRUE(heap_list.find("system") != heap_list.end());
303 
304     for (bool cpu_access_needed : {false, true}) {
305         static const size_t kAllocSizeInBytes = 4096;
306         /*
307          * Test that system heap can be allocated from.
308          */
309         SCOPED_TRACE(::testing::Message() << "cpu_access_needed: " << cpu_access_needed);
310         int map_fd = allocator->AllocSystem(cpu_access_needed, kAllocSizeInBytes);
311         ASSERT_GE(map_fd, 0);
312 
313         /*
314          * Test that system heap can be mmapped by the CPU.
315          */
316         void* ptr = mmap(NULL, kAllocSizeInBytes, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
317         ASSERT_TRUE(ptr != MAP_FAILED);
318 
319         /*
320          * Test that the allocated memory is zeroed.
321          */
322         auto zeroes_ptr = std::make_unique<char[]>(kAllocSizeInBytes);
323         int ret = allocator->CpuSyncStart(map_fd);
324         ASSERT_EQ(0, ret);
325 
326         ASSERT_EQ(0, memcmp(ptr, zeroes_ptr.get(), kAllocSizeInBytes));
327 
328         ret = allocator->CpuSyncEnd(map_fd);
329         ASSERT_EQ(0, ret);
330 
331         ASSERT_EQ(0, munmap(ptr, kAllocSizeInBytes));
332         ASSERT_EQ(0, close(map_fd));
333     }
334 }
335