1 // Copyright 2020 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #include "aemu/base/ring_buffer.h"
15 
16 #include "aemu/base/system/System.h"
17 #include "aemu/base/threads/FunctorThread.h"
18 
19 #include <gtest/gtest.h>
20 
21 #include <random>
22 
23 #include <errno.h>
24 #ifdef _MSC_VER
25 #include "aemu/base/msvc.h"
26 #else
27 #include <sys/time.h>
28 #endif
29 
30 namespace android {
31 namespace base {
32 
TEST(ring_buffer,Init)33 TEST(ring_buffer, Init) {
34     ring_buffer r;
35     ring_buffer_init(&r);
36 }
37 
38 static constexpr size_t kNumElts = 65536;
39 
40 // Tests that a large buffer can be produced and consumed,
41 // in a single thread.
TEST(ring_buffer,ProduceConsume)42 TEST(ring_buffer, ProduceConsume) {
43     std::default_random_engine generator;
44     generator.seed(0);
45 
46     std::vector<uint8_t> elements(kNumElts);
47 
48     // int for toolchain compatibility
49     std::uniform_int_distribution<int>
50         eltDistribution(0, 255);
51 
52     for (size_t i = 0; i < kNumElts; ++i) {
53         elements[i] =
54             static_cast<uint8_t>(
55                 eltDistribution(generator));
56     }
57 
58     std::vector<uint8_t> result(kNumElts);
59 
60     ring_buffer r;
61     ring_buffer_init(&r);
62 
63     size_t written = 0;
64     size_t read = 0;
65 
66     int i = 0;
67     while (written < kNumElts) {
68         ++i;
69 
70         // Safety factor; we do not expect the ring buffer
71         // implementation to be this hangy if used this way.
72         if (i > kNumElts * 10) {
73             FAIL() << "Error: too many iterations. Hanging?";
74             return;
75         }
76 
77         uint32_t toWrite = kNumElts - written;
78         long writtenThisTime =
79             ring_buffer_write(&r, elements.data() + written, 1, toWrite);
80         written += writtenThisTime;
81 
82         if (writtenThisTime < toWrite) {
83             EXPECT_EQ(-EAGAIN, errno);
84         }
85 
86         uint32_t toRead = kNumElts - read;
87         long readThisTime =
88             ring_buffer_read(&r, result.data() + read, 1, toRead);
89         read += readThisTime;
90 
91         if (readThisTime < toRead) {
92             EXPECT_EQ(-EAGAIN, errno);
93         }
94     }
95 
96     EXPECT_EQ(elements, result);
97 }
98 
99 // General function to pass to FunctorThread to read/write
100 // data completely to/from a ring buffer.
writeTest(ring_buffer * r,const uint8_t * data,size_t stepSize,size_t numSteps)101 static void writeTest(ring_buffer* r, const uint8_t* data, size_t stepSize, size_t numSteps) {
102     size_t stepsWritten = 0;
103     size_t bytes = stepSize * numSteps;
104     int i = 0;
105     while (stepsWritten < numSteps) {
106         ++i;
107 
108         // Safety factor; we do not expect the ring buffer
109         // implementation to be this hangy if used this way.
110         if (i > bytes * 10) {
111             FAIL() << "Error: too many iterations. Hanging?";
112             return;
113         }
114 
115         uint32_t stepsRemaining = numSteps - stepsWritten;
116         long stepsWrittenThisTime =
117             ring_buffer_write(r,
118                 data + stepSize * stepsWritten,
119                 stepSize, stepsRemaining);
120         stepsWritten += stepsWrittenThisTime;
121 
122         if (stepsWrittenThisTime < stepsRemaining) {
123             EXPECT_EQ(-EAGAIN, errno);
124         }
125     }
126 }
127 
readTest(ring_buffer * r,uint8_t * data,size_t stepSize,size_t numSteps)128 static void readTest(ring_buffer* r, uint8_t* data, size_t stepSize, size_t numSteps) {
129     size_t stepsRead = 0;
130     size_t bytes = stepSize * numSteps;
131     int i = 0;
132     while (stepsRead < numSteps) {
133         ++i;
134 
135         // Safety factor; we do not expect the ring buffer
136         // implementation to be this hangy if used this way.
137         if (i > bytes * 10) {
138             FAIL() << "Error: too many iterations. Hanging?";
139             return;
140         }
141 
142         uint32_t stepsRemaining = numSteps - stepsRead;
143         long stepsReadThisTime =
144             ring_buffer_read(r,
145                 data + stepSize * stepsRead,
146                 stepSize, stepsRemaining);
147         stepsRead += stepsReadThisTime;
148 
149         if (stepsReadThisTime < stepsRemaining) {
150             EXPECT_EQ(-EAGAIN, errno);
151         }
152     }
153 }
154 
155 // Tests transmission of a large buffer where
156 // the producer is in one thread
157 // while the consumer is in another thread.
TEST(ring_buffer,ProduceConsumeMultiThread)158 TEST(ring_buffer, ProduceConsumeMultiThread) {
159     std::default_random_engine generator;
160     generator.seed(0);
161 
162     std::vector<uint8_t> elements(kNumElts);
163 
164     // int for toolchain compatibility
165     std::uniform_int_distribution<int>
166         eltDistribution(0, 255);
167 
168     for (size_t i = 0; i < kNumElts; ++i) {
169         elements[i] =
170             static_cast<uint8_t>(
171                 eltDistribution(generator));
172     }
173 
174     std::vector<uint8_t> result(kNumElts, 0);
175 
176     ring_buffer r;
177     ring_buffer_init(&r);
178 
179     FunctorThread producer([&r, &elements]() {
180         writeTest(&r, (uint8_t*)elements.data(), 1, kNumElts);
181     });
182 
183     FunctorThread consumer([&r, &result]() {
184         readTest(&r, (uint8_t*)result.data(), 1, kNumElts);
185     });
186 
187     producer.start();
188     consumer.start();
189 
190     consumer.wait();
191 
192     EXPECT_EQ(elements, result);
193 }
194 
195 // Tests various step sizes of ring buffer transmission.
TEST(ring_buffer,DISABLED_ProduceConsumeMultiThreadVaryingStepSize)196 TEST(ring_buffer, DISABLED_ProduceConsumeMultiThreadVaryingStepSize) {
197     std::default_random_engine generator;
198     generator.seed(0);
199 
200     std::vector<uint8_t> elements(kNumElts);
201 
202     // int for toolchain compatibility
203     std::uniform_int_distribution<int>
204         eltDistribution(0, 255);
205 
206     for (size_t i = 0; i < kNumElts; ++i) {
207         elements[i] =
208             static_cast<uint8_t>(
209                 eltDistribution(generator));
210     }
211 
212     static constexpr size_t kStepSizesToTest[] = {
213         1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024,
214     };
215 
216     for (auto stepSize : kStepSizesToTest) {
217         size_t numSteps = kNumElts / stepSize;
218 
219         std::vector<uint8_t> result(kNumElts, 0);
220 
221         ring_buffer r;
222         ring_buffer_init(&r);
223 
224         FunctorThread producer([&r, &elements, stepSize, numSteps]() {
225             writeTest(&r, (uint8_t*)elements.data(), stepSize, numSteps);
226         });
227 
228         FunctorThread consumer([&r, &result, stepSize, numSteps]() {
229             readTest(&r, (uint8_t*)result.data(), stepSize, numSteps);
230         });
231 
232         producer.start();
233         consumer.start();
234 
235         consumer.wait();
236 
237         EXPECT_EQ(elements, result);
238     }
239 }
240 
viewWriteTest(ring_buffer * r,ring_buffer_view * v,const uint8_t * data,size_t stepSize,size_t numSteps)241 static void viewWriteTest(ring_buffer* r, ring_buffer_view* v, const uint8_t* data, size_t stepSize, size_t numSteps) {
242     size_t stepsWritten = 0;
243     size_t bytes = stepSize * numSteps;
244     int i = 0;
245     while (stepsWritten < numSteps) {
246         ++i;
247 
248         // Safety factor; we do not expect the ring buffer
249         // implementation to be this hangy if used this way.
250         if (i > bytes * 10) {
251             FAIL() << "Error: too many iterations. Hanging?";
252             return;
253         }
254 
255         uint32_t stepsRemaining = numSteps - stepsWritten;
256         long stepsWrittenThisTime =
257             ring_buffer_view_write(r, v,
258                 data + stepSize * stepsWritten,
259                 stepSize, stepsRemaining);
260         stepsWritten += stepsWrittenThisTime;
261 
262         if (stepsWrittenThisTime < stepsRemaining) {
263             EXPECT_EQ(-EAGAIN, errno);
264         }
265     }
266 }
267 
viewReadTest(ring_buffer * r,ring_buffer_view * v,uint8_t * data,size_t stepSize,size_t numSteps)268 static void viewReadTest(ring_buffer* r, ring_buffer_view* v, uint8_t* data, size_t stepSize, size_t numSteps) {
269     size_t stepsRead = 0;
270     size_t bytes = stepSize * numSteps;
271     int i = 0;
272     while (stepsRead < numSteps) {
273         ++i;
274 
275         // Safety factor; we do not expect the ring buffer
276         // implementation to be this hangy if used this way.
277         if (i > bytes * 10) {
278             FAIL() << "Error: too many iterations. Hanging?";
279             return;
280         }
281 
282         uint32_t stepsRemaining = numSteps - stepsRead;
283         long stepsReadThisTime =
284             ring_buffer_view_read(r, v,
285                 data + stepSize * stepsRead,
286                 stepSize, stepsRemaining);
287         stepsRead += stepsReadThisTime;
288 
289         if (stepsReadThisTime < stepsRemaining) {
290             EXPECT_EQ(-EAGAIN, errno);
291         }
292     }
293 }
294 
295 // Tests ring_buffer_calc_shift.
TEST(ring_buffer,CalcShift)296 TEST(ring_buffer, CalcShift) {
297     EXPECT_EQ(0, ring_buffer_calc_shift(1));
298     EXPECT_EQ(1, ring_buffer_calc_shift(2));
299     EXPECT_EQ(1, ring_buffer_calc_shift(3));
300     EXPECT_EQ(2, ring_buffer_calc_shift(4));
301     EXPECT_EQ(2, ring_buffer_calc_shift(5));
302     EXPECT_EQ(2, ring_buffer_calc_shift(6));
303     EXPECT_EQ(2, ring_buffer_calc_shift(7));
304     EXPECT_EQ(3, ring_buffer_calc_shift(8));
305 }
306 
307 // Tests usage of ring buffer with view.
TEST(ring_buffer,ProduceConsumeMultiThreadVaryingStepSizeWithView)308 TEST(ring_buffer, ProduceConsumeMultiThreadVaryingStepSizeWithView) {
309     std::default_random_engine generator;
310     generator.seed(0);
311 
312     std::vector<uint8_t> elements(kNumElts);
313 
314     // int for toolchain compatibility
315     std::uniform_int_distribution<int>
316         eltDistribution(0, 255);
317 
318     for (size_t i = 0; i < kNumElts; ++i) {
319         elements[i] =
320             static_cast<uint8_t>(
321                 eltDistribution(generator));
322     }
323 
324     static constexpr size_t kStepSizesToTest[] = {
325         1, 2, 4, 8, 16, 32, 64,
326         1024, 2048, 4096,
327     };
328 
329     for (auto stepSize : kStepSizesToTest) {
330         size_t numSteps = kNumElts / stepSize;
331 
332         std::vector<uint8_t> result(kNumElts, 0);
333 
334         // non power of 2
335         std::vector<uint8_t> buf(8193, 0);
336 
337         ring_buffer r;
338         ring_buffer_view v;
339         ring_buffer_view_init(&r, &v, buf.data(), buf.size());
340 
341         FunctorThread producer([&r, &v, &elements, stepSize, numSteps]() {
342             viewWriteTest(&r, &v, (uint8_t*)elements.data(), stepSize, numSteps);
343         });
344 
345         FunctorThread consumer([&r, &v, &result, stepSize, numSteps]() {
346             viewReadTest(&r, &v, (uint8_t*)result.data(), stepSize, numSteps);
347         });
348 
349         producer.start();
350         consumer.start();
351 
352         consumer.wait();
353 
354         EXPECT_EQ(elements, result);
355     }
356 }
357 
358 // Tests that wait works as expected
TEST(ring_buffer,Wait)359 TEST(ring_buffer, Wait) {
360     ring_buffer r;
361     ring_buffer_init(&r);
362 
363     EXPECT_TRUE(ring_buffer_wait_write(&r, nullptr, 1, 0));
364     EXPECT_FALSE(ring_buffer_wait_read(&r, nullptr, 1, 0));
365 
366     EXPECT_TRUE(ring_buffer_wait_write(&r, nullptr, 1, 100));
367     EXPECT_FALSE(ring_buffer_wait_read(&r, nullptr, 1, 100));
368 }
369 
370 // Tests the read/write fully operations
TEST(ring_buffer,FullReadWrite)371 TEST(ring_buffer, FullReadWrite) {
372     ring_buffer r;
373     ring_buffer_init(&r);
374 
375     std::default_random_engine generator;
376     generator.seed(0);
377 
378     // int for toolchain compatibility
379     std::uniform_int_distribution<int>
380         testSizeDistribution(1, 8192);
381 
382     std::uniform_int_distribution<int>
383         bufSizeDistribution(256, 8192);
384 
385     // int for toolchain compatibility
386     std::uniform_int_distribution<int>
387         eltDistribution(0, 255);
388 
389     size_t trials = 1000;
390 
391     for (size_t i = 0; i < trials; ++i) {
392         size_t testSize =
393             testSizeDistribution(generator);
394         size_t bufSize =
395             bufSizeDistribution(generator);
396 
397         std::vector<uint8_t> elements(testSize);
398         std::vector<uint8_t> result(testSize);
399         std::vector<uint8_t> buf(bufSize, 0);
400 
401         ring_buffer r;
402         ring_buffer_view v;
403         ring_buffer_view_init(&r, &v, buf.data(), buf.size());
404 
405         FunctorThread producer([&r, &v, &elements]() {
406             ring_buffer_write_fully(&r, &v, elements.data(), elements.size());
407         });
408 
409         FunctorThread consumer([&r, &v, &result]() {
410             ring_buffer_read_fully(&r, &v, result.data(), result.size());
411         });
412 
413         producer.start();
414         consumer.start();
415 
416         consumer.wait();
417 
418         EXPECT_EQ(elements, result);
419     }
420 }
421 
422 // Tests synchronization with producer driving most things along with
423 // consumer hangup.
424 // The test: A producer thread runs and spawns consumer threads on demand. Once
425 // each consumer thread is done with a bit of traffic, they hang up.
426 // Currently disabled due to it hanging on Windows.
427 // TODO(lfy@): figure out why it hangs on windows
TEST(ring_buffer,DISABLED_ProducerDrivenSync)428 TEST(ring_buffer, DISABLED_ProducerDrivenSync) {
429     std::default_random_engine generator;
430     generator.seed(0);
431     std::vector<uint8_t> elements(kNumElts);
432 
433     // int for toolchain compatibility
434     std::uniform_int_distribution<int>
435         eltDistribution(0, 255);
436 
437     for (size_t i = 0; i < kNumElts; ++i) {
438         elements[i] =
439             static_cast<uint8_t>(
440                 eltDistribution(generator));
441     }
442 
443     std::vector<uint8_t> result(kNumElts);
444 
445     ring_buffer r;
446     ring_buffer_init(&r);
447     ring_buffer_sync_init(&r);
448     size_t read = 0;
449     const size_t totalTestLength = kNumElts * 64;
450 
451     FunctorThread consumer([&r, &result, &read]() {
452         while (read < totalTestLength) {
453             if (ring_buffer_wait_read(&r, nullptr, 1, 1)) {
454                 ring_buffer_read_fully(
455                     &r, nullptr, result.data() + (read % result.size()), 1);
456                 ++read;
457             } else {
458                 if (!ring_buffer_consumer_hangup(&r)) {
459                     EXPECT_NE(RING_BUFFER_SYNC_CONSUMER_HANGING_UP, r.state);
460                     ring_buffer_consumer_wait_producer_idle(&r);
461                     while (ring_buffer_can_read(&r, 1)) {
462                         ring_buffer_read_fully(
463                             &r, nullptr, result.data() + (read % result.size()), 1);
464                         ++read;
465                     }
466                 }
467                 ring_buffer_consumer_hung_up(&r);
468             }
469         }
470     });
471 
472     consumer.start();
473 
474     FunctorThread producer([&r, &elements]() {
475         size_t written = 0;
476         while (written < totalTestLength) {
477             if (!ring_buffer_producer_acquire(&r)) {
478                 EXPECT_TRUE(
479                     r.state == RING_BUFFER_SYNC_CONSUMER_HANGING_UP ||
480                     r.state == RING_BUFFER_SYNC_CONSUMER_HUNG_UP);
481                 ring_buffer_producer_idle(&r);
482                 ring_buffer_producer_wait_hangup(&r);
483                 EXPECT_TRUE(ring_buffer_producer_acquire_from_hangup(&r));
484             }
485             ring_buffer_write_fully(
486                 &r, nullptr,
487                 elements.data() + (written % elements.size()), 1);
488             ++written;
489             ring_buffer_producer_idle(&r);
490         }
491     });
492 
493     producer.start();
494     consumer.wait();
495 
496     EXPECT_EQ(elements, result);
497 }
498 
499 // Tests the read/write fully operations
TEST(ring_buffer,SpeedTest)500 TEST(ring_buffer, SpeedTest) {
501     std::default_random_engine generator;
502     generator.seed(0);
503 
504     // int for toolchain compatibility
505     std::uniform_int_distribution<int>
506         eltDistribution(0, 255);
507 
508     size_t testSize = 1048576 * 8;
509     size_t bufSize = 16384;
510 
511     std::vector<uint8_t> elements(testSize);
512 
513     for (size_t i = 0; i < testSize; ++i) {
514         elements[i] = static_cast<uint8_t>(eltDistribution(generator));
515     }
516 
517     std::vector<uint8_t> result(testSize);
518 
519     std::vector<uint8_t> buf(bufSize, 0);
520 
521     ring_buffer r;
522     ring_buffer_view v;
523     ring_buffer_view_init(&r, &v, buf.data(), buf.size());
524 
525     size_t totalCycles = 5;
526 
527     float mbPerSec = 0.0f;
528 
529     for (size_t i = 0; i < totalCycles; ++i) {
530 
531         ring_buffer_view_init(&r, &v, buf.data(), buf.size());
532 
533         uint64_t start_us = android::base::getHighResTimeUs();
534 
535         FunctorThread producer([&r, &v, &elements]() {
536             ring_buffer_write_fully(&r, &v, elements.data(), elements.size());
537         });
538 
539         FunctorThread consumer([&r, &v, &result]() {
540             ring_buffer_read_fully(&r, &v, result.data(), result.size());
541         });
542 
543         producer.start();
544         consumer.start();
545         consumer.wait();
546 
547         uint64_t end_us = android::base::getHighResTimeUs();
548 
549         if (i % 10 == 0) {
550             fprintf(stderr, "%s: ring stats: live yield sleep %lu %lu %lu\n", __func__,
551                 (unsigned long)r.read_live_count,
552                 (unsigned long)r.read_yield_count,
553                 (unsigned long)r.read_sleep_us_count);
554         }
555         mbPerSec += (float(testSize) / (end_us - start_us));
556     }
557 
558     mbPerSec = mbPerSec / totalCycles;
559 
560     fprintf(stderr, "%s: avg mb per sec: %f\n", __func__, mbPerSec);
561 }
562 
563 // Tests copying out the contents available for read
564 // without incrementing the read index.
TEST(ring_buffer,CopyContents)565 TEST(ring_buffer, CopyContents) {
566     std::vector<uint8_t> elements = {
567         0x1, 0x2, 0x3, 0x4,
568         0x5, 0x6, 0x7, 0x8,
569     };
570 
571     std::vector<uint8_t> buf(4, 0);
572 
573     std::vector<uint8_t> recv(elements.size(), 0);
574 
575     ring_buffer r;
576     ring_buffer_view v;
577     ring_buffer_view_init(&r, &v, buf.data(), buf.size());
578 
579     EXPECT_EQ(true, ring_buffer_view_can_write(&r, &v, 3));
580     EXPECT_EQ(0, ring_buffer_available_read(&r, &v));
581 
582     uint8_t* elementsPtr = elements.data();
583     uint8_t* recvPtr = recv.data();
584 
585     EXPECT_EQ(1, ring_buffer_view_write(&r, &v, elementsPtr, 1, 1));
586     EXPECT_FALSE(ring_buffer_view_can_write(&r, &v, 3));
587     EXPECT_TRUE(ring_buffer_view_can_write(&r, &v, 2));
588     EXPECT_EQ(1, ring_buffer_available_read(&r, &v));
589     EXPECT_EQ(0, ring_buffer_copy_contents(&r, &v, 1, recvPtr));
590     EXPECT_EQ(0x1, *recvPtr);
591     EXPECT_EQ(1, ring_buffer_available_read(&r, &v));
592     EXPECT_EQ(1, ring_buffer_view_read(&r, &v, recvPtr, 1, 1));
593     EXPECT_EQ(0, ring_buffer_available_read(&r, &v));
594     EXPECT_TRUE(ring_buffer_view_can_write(&r, &v, 3));
595 
596     ++elementsPtr;
597     ++recvPtr;
598 
599     EXPECT_EQ(1, ring_buffer_view_write(&r, &v, elementsPtr, 3, 1));
600     EXPECT_FALSE(ring_buffer_view_can_write(&r, &v, 3));
601     EXPECT_EQ(3, ring_buffer_available_read(&r, &v));
602     EXPECT_EQ(0, ring_buffer_copy_contents(&r, &v, 3, recvPtr));
603     EXPECT_EQ(0x2, recvPtr[0]);
604     EXPECT_EQ(0x3, recvPtr[1]);
605     EXPECT_EQ(0x4, recvPtr[2]);
606     EXPECT_EQ(3, ring_buffer_available_read(&r, &v));
607     EXPECT_EQ(1, ring_buffer_view_read(&r, &v, recvPtr, 3, 1));
608     EXPECT_EQ(0, ring_buffer_available_read(&r, &v));
609     EXPECT_TRUE(ring_buffer_view_can_write(&r, &v, 3));
610 
611     elementsPtr += 3;
612     recvPtr += 3;
613 
614     EXPECT_EQ(1, ring_buffer_view_write(&r, &v, elementsPtr, 3, 1));
615     EXPECT_FALSE(ring_buffer_view_can_write(&r, &v, 3));
616     EXPECT_EQ(3, ring_buffer_available_read(&r, &v));
617     EXPECT_EQ(0, ring_buffer_copy_contents(&r, &v, 3, recvPtr));
618     EXPECT_EQ(0x5, recvPtr[0]);
619     EXPECT_EQ(0x6, recvPtr[1]);
620     EXPECT_EQ(0x7, recvPtr[2]);
621     EXPECT_EQ(3, ring_buffer_available_read(&r, &v));
622     EXPECT_EQ(1, ring_buffer_view_read(&r, &v, recvPtr, 3, 1));
623     EXPECT_EQ(0, ring_buffer_available_read(&r, &v));
624     EXPECT_TRUE(ring_buffer_view_can_write(&r, &v, 3));
625 }
626 
627 } // namespace android
628 } // namespace base
629