1 // Copyright 2019 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include <gtest/gtest.h> // for Message
16 #include <stdint.h> // for uint32_t
17 #include <stdio.h> // for printf
18 #include <string.h> // for size_t
19 #include <sys/types.h> // for ssize_t
20 #include <algorithm> // for uniform_...
21 #include <functional> // for __base
22 #include <random> // for default_...
23 #include <vector> // for vector
24
25 #include "aemu/base/ring_buffer.h" // for ring_buf...
26 #include "aemu/base/threads/FunctorThread.h" // for FunctorT...
27 #include "host-common/GraphicsAgentFactory.h" // for getConso...
28 #include "host-common/AddressSpaceService.h" // for AddressS...
29 #include "host-common/address_space_device.hpp" // for goldfish...
30 #include "host-common/address_space_graphics.h" // for AddressS...
31 #include "host-common/address_space_graphics_types.h" // for asg_context
32 #include "host-common/testing/MockGraphicsAgentFactory.h"
33 #include "testing/HostAddressSpace.h" // for HostAddr...
34 #include "host-common/globals.h" // for android_hw
35
36 namespace android {
37 namespace base {
38 class Stream;
39 } // namespace base
40 } // namespace android
41
42 using android::base::FunctorThread;
43
44
45
46 namespace android {
47 namespace emulation {
48 namespace asg {
49
50 #define ASG_TEST_READ_PATTERN 0xAA
51 #define ASG_TEST_WRITE_PATTERN 0xBB
52
53 class AddressSpaceGraphicsTest : public ::testing::Test {
54 public:
55 class Client {
56 public:
Client(HostAddressSpaceDevice * device)57 Client(HostAddressSpaceDevice* device) :
58 mDevice(device),
59 mHandle(mDevice->open()) {
60
61 ping((uint64_t)AddressSpaceDeviceType::Graphics);
62
63 auto getRingResult = ping((uint64_t)ASG_GET_RING);
64 mRingOffset = getRingResult.metadata;
65 mRingSize = getRingResult.size;
66
67 EXPECT_EQ(0, mDevice->claimShared(mHandle, mRingOffset, mRingSize));
68
69 mRingStorage =
70 (char*)mDevice->getHostAddr(
71 mDevice->offsetToPhysAddr(mRingOffset));
72
73 auto getBufferResult = ping((uint64_t)ASG_GET_BUFFER);
74 mBufferOffset = getBufferResult.metadata;
75 mBufferSize = getBufferResult.size;
76
77 EXPECT_EQ(0, mDevice->claimShared(mHandle, mBufferOffset, mBufferSize));
78 mBuffer =
79 (char*)mDevice->getHostAddr(
80 mDevice->offsetToPhysAddr(mBufferOffset));
81
82 mContext = asg_context_create(mRingStorage, mBuffer, mBufferSize);
83
84 EXPECT_EQ(mBuffer, mContext.buffer);
85
86 auto setVersionResult = ping((uint64_t)ASG_SET_VERSION, mVersion);
87 uint32_t hostVersion = setVersionResult.size;
88 EXPECT_LE(hostVersion, mVersion);
89 EXPECT_EQ(aemu_get_android_hw()->hw_gltransport_asg_writeStepSize,
90 mContext.ring_config->flush_interval);
91 EXPECT_EQ(aemu_get_android_hw()->hw_gltransport_asg_writeBufferSize,
92 mBufferSize);
93
94 mContext.ring_config->transfer_mode = 1;
95 mContext.ring_config->host_consumed_pos = 0;
96 mContext.ring_config->guest_write_pos = 0;
97 mBufferMask = mBufferSize - 1;
98
99 mWriteStart = mBuffer;
100 }
101
~Client()102 ~Client() {
103 mDevice->unclaimShared(mHandle, mBufferOffset);
104 mDevice->unclaimShared(mHandle, mRingOffset);
105 mDevice->close(mHandle);
106 }
107
isInError() const108 bool isInError() const {
109 return 1 == mContext.ring_config->in_error;
110 }
111
abort()112 void abort() {
113 mContext.ring_config->in_error = 1;
114 }
115
allocBuffer(size_t size)116 char* allocBuffer(size_t size) {
117 if (size > mContext.ring_config->flush_interval) {
118 return nullptr;
119 }
120
121 if (mWriteStart + mCurrentWriteBytes + size >
122 mWriteStart + mWriteStep) {
123 flush();
124 mCurrentWriteBytes = 0;
125 }
126
127 char* res = mWriteStart + mCurrentWriteBytes;
128 mCurrentWriteBytes += size;
129
130 return res;
131 }
132
writeFully(const char * buf,size_t size)133 int writeFully(const char* buf, size_t size) {
134 flush();
135 ensureType1Finished();
136 mContext.ring_config->transfer_size = size;
137 mContext.ring_config->transfer_mode = 3;
138
139 size_t sent = 0;
140 size_t quarterRingSize = mBufferSize / 4;
141 size_t chunkSize = size < quarterRingSize ? size : quarterRingSize;
142
143 while (sent < size) {
144 size_t remaining = size - sent;
145 size_t sendThisTime = remaining < chunkSize ? remaining : chunkSize;
146
147 long sentChunks =
148 ring_buffer_view_write(
149 mContext.to_host_large_xfer.ring,
150 &mContext.to_host_large_xfer.view,
151 buf + sent, sendThisTime, 1);
152
153 if (*(mContext.host_state) != ASG_HOST_STATE_CAN_CONSUME) {
154 ping(ASG_NOTIFY_AVAILABLE);
155 }
156
157 if (sentChunks == 0) {
158 ring_buffer_yield();
159 }
160
161 sent += sentChunks * sendThisTime;
162
163 if (isInError()) {
164 return -1;
165 }
166 }
167
168 ensureType3Finished();
169 mContext.ring_config->transfer_mode = 1;
170 return 0;
171 }
172
speculativeRead(char * readBuffer,size_t minSizeToRead)173 ssize_t speculativeRead(char* readBuffer, size_t minSizeToRead) {
174 flush();
175 ensureConsumerFinishing();
176
177 size_t actuallyRead = 0;
178 while (!actuallyRead) {
179 uint32_t readAvail =
180 ring_buffer_available_read(
181 mContext.from_host_large_xfer.ring,
182 &mContext.from_host_large_xfer.view);
183
184 if (!readAvail) {
185 ring_buffer_yield();
186 continue;
187 }
188
189 uint32_t toRead = readAvail > minSizeToRead ?
190 minSizeToRead : readAvail;
191
192 long stepsRead = ring_buffer_view_read(
193 mContext.from_host_large_xfer.ring,
194 &mContext.from_host_large_xfer.view,
195 readBuffer, toRead, 1);
196
197 actuallyRead += stepsRead * toRead;
198
199 if (isInError()) {
200 return -1;
201 }
202 }
203
204 return actuallyRead;
205 }
206
flush()207 void flush() {
208 if (!mCurrentWriteBytes) return;
209 type1WriteWithNotify(mWriteStart - mBuffer, mCurrentWriteBytes);
210 advanceWrite();
211 }
212
get_relative_buffer_pos(uint32_t pos)213 uint32_t get_relative_buffer_pos(uint32_t pos) {
214 return pos & mBufferMask;
215 }
216
get_available_for_write()217 uint32_t get_available_for_write() {
218 uint32_t host_consumed_view;
219 __atomic_load(&mContext.ring_config->host_consumed_pos,
220 &host_consumed_view,
221 __ATOMIC_SEQ_CST);
222 uint32_t availableForWrite =
223 get_relative_buffer_pos(
224 host_consumed_view -
225 mContext.ring_config->guest_write_pos - 1);
226 return availableForWrite;
227 }
228
advanceWrite()229 void advanceWrite() {
230 uint32_t avail = get_available_for_write();
231
232 while (avail < mContext.ring_config->flush_interval) {
233 ensureConsumerFinishing();
234 avail = get_available_for_write();
235 }
236
237 __atomic_add_fetch(
238 &mContext.ring_config->guest_write_pos,
239 mContext.ring_config->flush_interval,
240 __ATOMIC_SEQ_CST);
241
242 char* newBuffer =
243 mBuffer +
244 get_relative_buffer_pos(
245 mContext.ring_config->guest_write_pos);
246
247 mWriteStart = newBuffer;
248 mCurrentWriteBytes = 0;
249 }
250
type1WriteWithNotify(uint32_t bufferOffset,size_t size)251 int type1WriteWithNotify(uint32_t bufferOffset, size_t size) {
252 size_t sent = 0;
253 size_t sizeForRing = 8;
254
255 struct asg_type1_xfer xfer {
256 bufferOffset,
257 (uint32_t)size,
258 };
259
260 uint8_t* writeBufferBytes = (uint8_t*)(&xfer);
261
262 while (sent < sizeForRing) {
263
264 long sentChunks = ring_buffer_write(
265 mContext.to_host, writeBufferBytes + sent, sizeForRing - sent, 1);
266
267 if (*(mContext.host_state) != ASG_HOST_STATE_CAN_CONSUME) {
268 ping(ASG_NOTIFY_AVAILABLE);
269 }
270
271 if (sentChunks == 0) {
272 ring_buffer_yield();
273 }
274
275 sent += sentChunks * (sizeForRing - sent);
276
277 if (isInError()) {
278 return -1;
279 }
280 }
281
282 return 0;
283 }
284
ensureConsumerFinishing()285 void ensureConsumerFinishing() {
286 uint32_t currAvailRead =
287 ring_buffer_available_read(mContext.to_host, 0);
288
289 while (currAvailRead) {
290 ring_buffer_yield();
291 uint32_t nextAvailRead = ring_buffer_available_read(mContext.to_host, 0);
292
293 if (nextAvailRead != currAvailRead) {
294 break;
295 }
296
297 if (*(mContext.host_state) != ASG_HOST_STATE_CAN_CONSUME) {
298 ping(ASG_NOTIFY_AVAILABLE);
299 break;
300 }
301 }
302 }
303
ensureType1Finished()304 void ensureType1Finished() {
305 ensureConsumerFinishing();
306
307 uint32_t currAvailRead =
308 ring_buffer_available_read(mContext.to_host, 0);
309
310 while (currAvailRead) {
311 ring_buffer_yield();
312 currAvailRead = ring_buffer_available_read(mContext.to_host, 0);
313 if (isInError()) {
314 return;
315 }
316 }
317 }
318
ensureType3Finished()319 void ensureType3Finished() {
320 uint32_t availReadLarge =
321 ring_buffer_available_read(
322 mContext.to_host_large_xfer.ring,
323 &mContext.to_host_large_xfer.view);
324 while (availReadLarge) {
325 ring_buffer_yield();
326 availReadLarge =
327 ring_buffer_available_read(
328 mContext.to_host_large_xfer.ring,
329 &mContext.to_host_large_xfer.view);
330 if (*(mContext.host_state) != ASG_HOST_STATE_CAN_CONSUME) {
331 ping(ASG_NOTIFY_AVAILABLE);
332 }
333 if (isInError()) {
334 return;
335 }
336 }
337 }
338
getBufferPtr()339 char* getBufferPtr() { return mBuffer; }
340
341 private:
342
ping(uint64_t metadata,uint64_t size=0)343 AddressSpaceDevicePingInfo ping(uint64_t metadata, uint64_t size = 0) {
344 AddressSpaceDevicePingInfo info;
345 info.metadata = metadata;
346 mDevice->ping(mHandle, &info);
347 return info;
348 }
349
350 HostAddressSpaceDevice* mDevice;
351 uint32_t mHandle;
352 uint64_t mRingOffset;
353 uint64_t mRingSize;
354 uint64_t mBufferOffset;
355 uint64_t mBufferSize;
356 char* mRingStorage;
357 char* mBuffer;
358 struct asg_context mContext;
359 uint32_t mVersion = 1;
360
361 char* mWriteStart = 0;
362 uint32_t mWriteStep = 0;
363 uint32_t mCurrentWriteBytes = 0;
364 uint32_t mBufferMask = 0;
365 };
366
367 class Consumer {
368 public:
Consumer(struct asg_context context,ConsumerCallbacks callbacks)369 Consumer(struct asg_context context,
370 ConsumerCallbacks callbacks) :
371 mContext(context),
372 mCallbacks(callbacks),
373 mThread([this] { threadFunc(); }) {
374 mThread.start();
375 }
376
~Consumer()377 ~Consumer() {
378 mThread.wait();
379 }
380
setRoundTrip(bool enabled,uint32_t toHostBytes=0,uint32_t fromHostBytes=0)381 void setRoundTrip(bool enabled,
382 uint32_t toHostBytes = 0,
383 uint32_t fromHostBytes = 0) {
384 mRoundTripEnabled = enabled;
385 if (mRoundTripEnabled) {
386 mToHostBytes = toHostBytes;
387 mFromHostBytes = fromHostBytes;
388 }
389 }
390
handleRoundTrip()391 void handleRoundTrip() {
392 if (!mRoundTripEnabled) return;
393
394 if (mReadPos == mToHostBytes) {
395 std::vector<char> reply(mFromHostBytes, ASG_TEST_READ_PATTERN);
396 uint32_t origBytes = mFromHostBytes;
397 auto res = ring_buffer_write_fully_with_abort(
398 mContext.from_host_large_xfer.ring,
399 &mContext.from_host_large_xfer.view,
400 reply.data(),
401 mFromHostBytes,
402 1, &mContext.ring_config->in_error);
403 if (res < mFromHostBytes) {
404 printf("%s: aborted write (%u vs %u %u). in error? %u\n", __func__,
405 res, mFromHostBytes, origBytes,
406 mContext.ring_config->in_error);
407 EXPECT_EQ(1, mContext.ring_config->in_error);
408 }
409 mReadPos = 0;
410 }
411 }
412
ensureWritebackDone()413 void ensureWritebackDone() {
414 while (mReadPos) {
415 ring_buffer_yield();
416 }
417 }
418
step()419 int step() {
420
421 uint32_t nonLargeAvail =
422 ring_buffer_available_read(
423 mContext.to_host, 0);
424
425 uint32_t largeAvail =
426 ring_buffer_available_read(
427 mContext.to_host_large_xfer.ring,
428 &mContext.to_host_large_xfer.view);
429
430 ensureReadBuffer(nonLargeAvail);
431
432 int res = 0;
433 if (nonLargeAvail) {
434 uint32_t transferMode = mContext.ring_config->transfer_mode;
435
436 switch (transferMode) {
437 case 1:
438 type1Read(nonLargeAvail);
439 break;
440 case 2:
441 type2Read(nonLargeAvail);
442 break;
443 case 3:
444 break;
445 default:
446 EXPECT_TRUE(false) << "Failed, invalid transfer mode";
447 }
448
449
450 res = 0;
451 } else if (largeAvail) {
452 res = type3Read(largeAvail);
453 } else {
454 res = mCallbacks.onUnavailableRead();
455 }
456
457 handleRoundTrip();
458
459 return res;
460 }
461
ensureReadBuffer(uint32_t new_xfer)462 void ensureReadBuffer(uint32_t new_xfer) {
463 size_t readBufferAvail = mReadBuffer.size() - mReadPos;
464 if (readBufferAvail < new_xfer) {
465 mReadBuffer.resize(mReadBuffer.size() + 2 * new_xfer);
466 }
467 }
468
type1Read(uint32_t avail)469 void type1Read(uint32_t avail) {
470 uint32_t xferTotal = avail / 8;
471 for (uint32_t i = 0; i < xferTotal; ++i) {
472 struct asg_type1_xfer currentXfer;
473 uint8_t* currentXferPtr = (uint8_t*)(¤tXfer);
474
475 EXPECT_EQ(0, ring_buffer_copy_contents(
476 mContext.to_host, 0,
477 sizeof(currentXfer), currentXferPtr));
478
479 char* ptr = mContext.buffer + currentXfer.offset;
480 size_t size = currentXfer.size;
481
482 ensureReadBuffer(size);
483
484 memcpy(mReadBuffer.data() + mReadPos,
485 ptr, size);
486
487 for (uint32_t j = 0; j < size; ++j) {
488 EXPECT_EQ((char)ASG_TEST_WRITE_PATTERN,
489 (mReadBuffer.data() + mReadPos)[j]);
490 }
491
492 mReadPos += size;
493 mContext.ring_config->host_consumed_pos =
494 ptr - mContext.buffer;
495
496 EXPECT_EQ(1, ring_buffer_advance_read(
497 mContext.to_host, sizeof(asg_type1_xfer), 1));
498 }
499 }
500
type2Read(uint32_t avail)501 void type2Read(uint32_t avail) {
502 uint32_t xferTotal = avail / 16;
503 for (uint32_t i = 0; i < xferTotal; ++i) {
504 struct asg_type2_xfer currentXfer;
505 uint8_t* xferPtr = (uint8_t*)(¤tXfer);
506
507 EXPECT_EQ(0, ring_buffer_copy_contents(
508 mContext.to_host, 0, sizeof(currentXfer),
509 xferPtr));
510
511 char* ptr = mCallbacks.getPtr(currentXfer.physAddr);
512 ensureReadBuffer(currentXfer.size);
513
514 memcpy(mReadBuffer.data() + mReadPos, ptr,
515 currentXfer.size);
516 mReadPos += currentXfer.size;
517
518 EXPECT_EQ(1, ring_buffer_advance_read(
519 mContext.to_host, sizeof(currentXfer), 1));
520 }
521 }
522
type3Read(uint32_t avail)523 int type3Read(uint32_t avail) {
524 (void)avail;
525 ensureReadBuffer(avail);
526 ring_buffer_read_fully_with_abort(
527 mContext.to_host_large_xfer.ring,
528 &mContext.to_host_large_xfer.view,
529 mReadBuffer.data() + mReadPos,
530 avail,
531 1, &mContext.ring_config->in_error);
532 mReadPos += avail;
533 return 0;
534 }
535
536 private:
537
threadFunc()538 void threadFunc() {
539 while(-1 != step());
540 }
541
542 struct asg_context mContext;
543 ConsumerCallbacks mCallbacks;
544 FunctorThread mThread;
545 std::vector<char> mReadBuffer;
546 std::vector<char> mWriteBuffer;
547 size_t mReadPos = 0;
548 uint32_t mToHostBytes = 0;
549 uint32_t mFromHostBytes = 0;
550 bool mRoundTripEnabled = false;
551 };
552
553 protected:
SetUpTestSuite()554 static void SetUpTestSuite() {
555 android::emulation::injectGraphicsAgents(
556 android::emulation::MockGraphicsAgentFactory());
557 goldfish_address_space_set_vm_operations(getGraphicsAgents()->vm);
558 }
559
TearDownTestSuite()560 static void TearDownTestSuite() { }
561
SetUp()562 void SetUp() override {
563 aemu_get_android_hw()->hw_gltransport_asg_writeBufferSize = 524288;
564 aemu_get_android_hw()->hw_gltransport_asg_writeStepSize = 1024;
565
566 mDevice = HostAddressSpaceDevice::get();
567 ConsumerInterface interface = {
568 // create
569 [this](struct asg_context context,
570 base::Stream* loadStream,
571 ConsumerCallbacks callbacks,
572 uint32_t contextId, uint32_t capsetId,
573 std::optional<std::string> nameOpt) {
574 Consumer* c = new Consumer(context, callbacks);
575 mCurrentConsumer = c;
576 return (void*)c;
577 },
578 // destroy
579 [this](void* context) {
580 Consumer* c = reinterpret_cast<Consumer*>(context);
581 delete c;
582 mCurrentConsumer = nullptr;
583 },
584 // presave
585 [](void* consumer) { },
586 // global presave
587 []() { },
588 // save
589 [](void* consumer, base::Stream* stream) { },
590 // global postsave
591 []() { },
592 // postsave
593 [](void* consumer) { },
594 // postload
595 [](void* consumer) { },
596 // global preload
597 []() { },
598 };
599 AddressSpaceGraphicsContext::setConsumer(interface);
600 }
601
TearDown()602 void TearDown() override {
603 AddressSpaceGraphicsContext::clear();
604 mDevice->clear();
605 aemu_get_android_hw()->hw_gltransport_asg_writeBufferSize = 524288;
606 aemu_get_android_hw()->hw_gltransport_asg_writeStepSize = 1024;
607 EXPECT_EQ(nullptr, mCurrentConsumer);
608 }
609
setRoundTrip(bool enabled,size_t writeBytes,size_t readBytes)610 void setRoundTrip(bool enabled, size_t writeBytes, size_t readBytes) {
611 EXPECT_NE(nullptr, mCurrentConsumer);
612 mCurrentConsumer->setRoundTrip(enabled, writeBytes, readBytes);
613 }
614
615 struct RoundTrip {
616 size_t writeBytes;
617 size_t readBytes;
618 };
619
runRoundTrips(Client & client,const std::vector<RoundTrip> & trips)620 void runRoundTrips(Client& client, const std::vector<RoundTrip>& trips) {
621 EXPECT_NE(nullptr, mCurrentConsumer);
622
623 for (const auto& trip : trips) {
624 mCurrentConsumer->setRoundTrip(true, trip.writeBytes, trip.readBytes);
625
626 std::vector<char> send(trip.writeBytes, ASG_TEST_WRITE_PATTERN);
627 std::vector<char> expectedRead(trip.readBytes, ASG_TEST_READ_PATTERN);
628 std::vector<char> toRead(trip.readBytes, 0);
629
630 size_t stepSize = aemu_get_android_hw()->hw_gltransport_asg_writeStepSize;
631 size_t stepSizeRead = aemu_get_android_hw()->hw_gltransport_asg_writeBufferSize;
632
633 size_t sent = 0;
634 while (sent < trip.writeBytes) {
635 size_t remaining = trip.writeBytes - sent;
636 size_t next = remaining < stepSize ? remaining : stepSize;
637 auto buf = client.allocBuffer(next);
638 memcpy(buf, send.data() + sent, next);
639 sent += next;
640 }
641
642 client.flush();
643
644 size_t recv = 0;
645
646 while (recv < trip.readBytes) {
647 ssize_t readThisTime = client.speculativeRead(
648 toRead.data() + recv, stepSizeRead);
649 EXPECT_GE(readThisTime, 0);
650 recv += readThisTime;
651 }
652
653 EXPECT_EQ(expectedRead, toRead);
654
655 // make sure the consumer is hung up here or this will
656 // race with setRoundTrip
657 mCurrentConsumer->ensureWritebackDone();
658 }
659
660 mCurrentConsumer->setRoundTrip(false);
661 }
662
663 HostAddressSpaceDevice* mDevice = nullptr;
664 Consumer* mCurrentConsumer = nullptr;
665 };
666
667 // Tests that we can create a client for ASG,
668 // which then in turn creates a consumer thread on the "host."
669 // Then test the thread teardown.
TEST_F(AddressSpaceGraphicsTest,Basic)670 TEST_F(AddressSpaceGraphicsTest, Basic) {
671 Client client(mDevice);
672 }
673
674 // Tests writing via an IOStream-like interface
675 // (allocBuffer, then flush)
TEST_F(AddressSpaceGraphicsTest,BasicWrite)676 TEST_F(AddressSpaceGraphicsTest, BasicWrite) {
677 EXPECT_EQ(1024, aemu_get_android_hw()->hw_gltransport_asg_writeStepSize);
678 Client client(mDevice);
679
680 // Tests that going over the step size results in nullptr
681 // when using allocBuffer
682 auto buf = client.allocBuffer(1025);
683 EXPECT_EQ(nullptr, buf);
684
685 buf = client.allocBuffer(4);
686 EXPECT_NE(nullptr, buf);
687 memset(buf, ASG_TEST_WRITE_PATTERN, 4);
688 client.flush();
689 }
690
691 // Tests that further allocs result in flushing
TEST_F(AddressSpaceGraphicsTest,FlushFromAlloc)692 TEST_F(AddressSpaceGraphicsTest, FlushFromAlloc) {
693 EXPECT_EQ(1024, aemu_get_android_hw()->hw_gltransport_asg_writeStepSize);
694 Client client(mDevice);
695
696 auto buf = client.allocBuffer(1024);
697 memset(buf, ASG_TEST_WRITE_PATTERN, 1024);
698
699 for (uint32_t i = 0; i < 10; ++i) {
700 buf = client.allocBuffer(1024);
701 memset(buf, ASG_TEST_WRITE_PATTERN, 1024);
702 }
703 }
704
705 // Tests type 3 (large) transfer by itself
TEST_F(AddressSpaceGraphicsTest,LargeXfer)706 TEST_F(AddressSpaceGraphicsTest, LargeXfer) {
707 Client client(mDevice);
708
709 std::vector<char> largeBuf(1048576, ASG_TEST_WRITE_PATTERN);
710 client.writeFully(largeBuf.data(), largeBuf.size());
711 }
712
713 // Round trip test
TEST_F(AddressSpaceGraphicsTest,RoundTrip)714 TEST_F(AddressSpaceGraphicsTest, RoundTrip) {
715 Client client(mDevice);
716 setRoundTrip(true, 1, 1);
717 char element = (char)(ASG_TEST_WRITE_PATTERN);
718 char reply;
719
720 auto buf = client.allocBuffer(1);
721 *buf = element;
722 client.flush();
723 client.speculativeRead(&reply, 1);
724 }
725
726 // Round trip test (more than one)
TEST_F(AddressSpaceGraphicsTest,RoundTrips)727 TEST_F(AddressSpaceGraphicsTest, RoundTrips) {
728 Client client(mDevice);
729
730 std::vector<RoundTrip> trips = {
731 { 1, 1, },
732 { 2, 2, },
733 { 4, 4, },
734 { 1026, 34, },
735 { 4, 1048576, },
736 };
737
738 runRoundTrips(client, trips);
739 }
740
741 // Round trip test (random)
TEST_F(AddressSpaceGraphicsTest,RoundTripsRandom)742 TEST_F(AddressSpaceGraphicsTest, RoundTripsRandom) {
743 Client client(mDevice);
744
745 std::default_random_engine generator;
746 generator.seed(0);
747 std::uniform_int_distribution<int>
748 sizeDist(1, 4097);
749 std::vector<RoundTrip> trips;
750 for (uint32_t i = 0; i < 1000; ++i) {
751 trips.push_back({
752 (size_t)sizeDist(generator),
753 (size_t)sizeDist(generator),
754 });
755 };
756
757 runRoundTrips(client, trips);
758 }
759
760 // Abort test. Say that we are reading back 4096
761 // bytes, but only actually read back 1 then abort.
TEST_F(AddressSpaceGraphicsTest,Abort)762 TEST_F(AddressSpaceGraphicsTest, Abort) {
763 Client client(mDevice);
764 setRoundTrip(true, 1, 1048576);
765
766 char send = ASG_TEST_WRITE_PATTERN;
767 auto buf = client.allocBuffer(1);
768 *buf = send;
769 client.flush();
770 client.abort();
771 }
772
773 // Test having to create more than one block, and
774 // ensure traffic works each time.
TEST_F(AddressSpaceGraphicsTest,BlockCreateDestroy)775 TEST_F(AddressSpaceGraphicsTest, BlockCreateDestroy) {
776
777 std::vector<Client*> clients;
778
779 std::default_random_engine generator;
780 generator.seed(0);
781 std::uniform_int_distribution<int>
782 sizeDist(1, 47);
783 std::vector<RoundTrip> trips;
784 for (uint32_t i = 0; i < 100; ++i) {
785 trips.push_back({
786 (size_t)sizeDist(generator),
787 (size_t)sizeDist(generator),
788 });
789 };
790
791 int numBlocksMax = 3;
792 int numBlocksDetected = 0;
793 char* bufLow = (char*)(uintptr_t)(-1);
794 char* bufHigh = 0;
795
796 while (true) {
797 Client* c = new Client(mDevice);
798 runRoundTrips(*c, trips);
799
800 clients.push_back(c);
801
802 char* bufPtr = c->getBufferPtr();
803 bufLow = bufPtr < bufLow ? bufPtr : bufLow;
804 bufHigh = bufPtr > bufHigh ? bufPtr : bufHigh;
805
806 size_t gap = bufHigh - bufLow;
807
808 numBlocksDetected =
809 gap / ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE;
810
811 if (numBlocksDetected > numBlocksMax) break;
812 }
813
814 for (auto c: clients) {
815 delete c;
816 }
817 }
818
819 // Test having to create more than one block, and
820 // ensure traffic works each time, but also randomly
821 // delete previous allocs to cause fragmentation.
TEST_F(AddressSpaceGraphicsTest,BlockCreateDestroyRandom)822 TEST_F(AddressSpaceGraphicsTest, BlockCreateDestroyRandom) {
823 std::vector<Client*> clients;
824
825 std::default_random_engine generator;
826 generator.seed(0);
827
828 std::uniform_int_distribution<int>
829 sizeDist(1, 89);
830 std::bernoulli_distribution
831 deleteDist(0.2);
832
833 std::vector<RoundTrip> trips;
834 for (uint32_t i = 0; i < 100; ++i) {
835 trips.push_back({
836 (size_t)sizeDist(generator),
837 (size_t)sizeDist(generator),
838 });
839 };
840
841 int numBlocksMax = 3;
842 int numBlocksDetected = 0;
843 char* bufLow = (char*)(uintptr_t)(-1);
844 char* bufHigh = 0;
845
846 while (true) {
847 Client* c = new Client(mDevice);
848 runRoundTrips(*c, trips);
849
850 clients.push_back(c);
851
852 char* bufPtr = c->getBufferPtr();
853 bufLow = bufPtr < bufLow ? bufPtr : bufLow;
854 bufHigh = bufPtr > bufHigh ? bufPtr : bufHigh;
855
856 size_t gap = bufHigh - bufLow;
857
858 numBlocksDetected =
859 gap / ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE;
860
861 if (numBlocksDetected > numBlocksMax) break;
862
863 if (deleteDist(generator)) {
864 delete c;
865 clients[clients.size() - 1] = 0;
866 }
867 }
868
869 for (auto c: clients) {
870 delete c;
871 }
872 }
873
874 } // namespace asg
875 } // namespace emulation
876 } // namespace android
877