1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include "AddressSpaceStream.h"
17 
18 #include <errno.h>
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include <string.h>
22 #include <unistd.h>
23 
24 #include "VirtGpu.h"
25 #include "aemu/base/Tracing.h"
26 #include "util.h"
27 #include "virtgpu_gfxstream_protocol.h"
28 
29 #if defined(__ANDROID__)
30 #include "android-base/properties.h"
31 #endif
32 #include <cutils/log.h>
33 
34 static const size_t kReadSize = 512 * 1024;
35 static const size_t kWriteOffset = kReadSize;
36 
AddressSpaceStream(address_space_handle_t handle,uint32_t version,struct asg_context context,uint64_t ringOffset,uint64_t writeBufferOffset,struct address_space_ops ops,HealthMonitor<> * healthMonitor)37 AddressSpaceStream::AddressSpaceStream(
38     address_space_handle_t handle,
39     uint32_t version,
40     struct asg_context context,
41     uint64_t ringOffset,
42     uint64_t writeBufferOffset,
43     struct address_space_ops ops,
44     HealthMonitor<>* healthMonitor) :
45     IOStream(context.ring_config->flush_interval),
46     m_ops(ops),
47     m_tmpBuf(0),
48     m_tmpBufSize(0),
49     m_tmpBufXferSize(0),
50     m_usingTmpBuf(0),
51     m_readBuf(0),
52     m_read(0),
53     m_readLeft(0),
54     m_handle(handle),
55     m_version(version),
56     m_context(context),
57     m_ringOffset(ringOffset),
58     m_writeBufferOffset(writeBufferOffset),
59     m_writeBufferSize(context.ring_config->buffer_size),
60     m_writeBufferMask(m_writeBufferSize - 1),
61     m_buf((unsigned char*)context.buffer),
62     m_writeStart(m_buf),
63     m_writeStep(context.ring_config->flush_interval),
64     m_notifs(0),
65     m_written(0),
66     m_backoffIters(0),
67     m_backoffFactor(1),
68     m_ringStorageSize(sizeof(struct asg_ring_storage) + m_writeBufferSize),
69     m_healthMonitor(healthMonitor) {
70     // We'll use this in the future, but at the moment,
71     // it's a potential compile Werror.
72     (void)m_ringStorageSize;
73     (void)m_version;
74 }
75 
~AddressSpaceStream()76 AddressSpaceStream::~AddressSpaceStream() {
77     flush();
78     ensureType3Finished();
79     ensureType1Finished();
80 
81     if (!m_mapping) {
82         m_ops.unmap(m_context.to_host, sizeof(struct asg_ring_storage));
83         m_ops.unmap(m_context.buffer, m_writeBufferSize);
84         m_ops.unclaim_shared(m_handle, m_ringOffset);
85         m_ops.unclaim_shared(m_handle, m_writeBufferOffset);
86     }
87 
88     m_ops.close(m_handle);
89     if (m_readBuf) free(m_readBuf);
90     if (m_tmpBuf) free(m_tmpBuf);
91 }
92 
idealAllocSize(size_t len)93 size_t AddressSpaceStream::idealAllocSize(size_t len) {
94     if (len > m_writeStep) return len;
95     return m_writeStep;
96 }
97 
allocBuffer(size_t minSize)98 void *AddressSpaceStream::allocBuffer(size_t minSize) {
99     auto watchdog = WATCHDOG_BUILDER(m_healthMonitor, "ASG watchdog").build();
100     AEMU_SCOPED_TRACE("allocBuffer");
101     ensureType3Finished();
102 
103     if (!m_readBuf) {
104         m_readBuf = (unsigned char*)malloc(kReadSize);
105     }
106 
107     size_t allocSize =
108         (m_writeStep < minSize ? minSize : m_writeStep);
109 
110     if (m_writeStep < allocSize) {
111         if (!m_tmpBuf) {
112             m_tmpBufSize = allocSize * 2;
113             m_tmpBuf = (unsigned char*)malloc(m_tmpBufSize);
114         }
115 
116         if (m_tmpBufSize < allocSize) {
117             m_tmpBufSize = allocSize * 2;
118             m_tmpBuf = (unsigned char*)realloc(m_tmpBuf, m_tmpBufSize);
119         }
120 
121         if (!m_usingTmpBuf) {
122             flush();
123         }
124 
125         m_usingTmpBuf = true;
126         m_tmpBufXferSize = allocSize;
127         return m_tmpBuf;
128     } else {
129         if (m_usingTmpBuf) {
130             writeFully(m_tmpBuf, m_tmpBufXferSize);
131             m_usingTmpBuf = false;
132             m_tmpBufXferSize = 0;
133         }
134 
135         return m_writeStart;
136     }
137 }
138 
commitBuffer(size_t size)139 int AddressSpaceStream::commitBuffer(size_t size)
140 {
141     if (size == 0) return 0;
142 
143     if (m_usingTmpBuf) {
144         writeFully(m_tmpBuf, size);
145         m_tmpBufXferSize = 0;
146         m_usingTmpBuf = false;
147         return 0;
148     } else {
149         int res = type1Write(m_writeStart - m_buf, size);
150         advanceWrite();
151         return res;
152     }
153 }
154 
readFully(void * ptr,size_t totalReadSize)155 const unsigned char *AddressSpaceStream::readFully(void *ptr, size_t totalReadSize)
156 {
157 
158     unsigned char* userReadBuf = static_cast<unsigned char*>(ptr);
159 
160     if (!userReadBuf) {
161         if (totalReadSize > 0) {
162             ALOGE("AddressSpaceStream::commitBufferAndReadFully failed, userReadBuf=NULL, totalReadSize %zu, lethal"
163                     " error, exiting.", totalReadSize);
164             abort();
165         }
166         return nullptr;
167     }
168 
169     // Advance buffered read if not yet consumed.
170     size_t remaining = totalReadSize;
171     size_t bufferedReadSize =
172         m_readLeft < remaining ? m_readLeft : remaining;
173 
174     if (bufferedReadSize) {
175         memcpy(userReadBuf,
176                m_readBuf + (m_read - m_readLeft),
177                bufferedReadSize);
178         remaining -= bufferedReadSize;
179         m_readLeft -= bufferedReadSize;
180     }
181 
182     if (!remaining) return userReadBuf;
183 
184     // Read up to kReadSize bytes if all buffered read has been consumed.
185     size_t maxRead = m_readLeft ? 0 : kReadSize;
186     ssize_t actual = 0;
187 
188     if (maxRead) {
189         actual = speculativeRead(m_readBuf, maxRead);
190 
191         // Updated buffered read size.
192         if (actual > 0) {
193             m_read = m_readLeft = actual;
194         }
195 
196         if (actual == 0) {
197             ALOGD("%s: end of pipe", __FUNCTION__);
198             return NULL;
199         }
200     }
201 
202     // Consume buffered read and read more if necessary.
203     while (remaining) {
204         bufferedReadSize = m_readLeft < remaining ? m_readLeft : remaining;
205         if (bufferedReadSize) {
206             memcpy(userReadBuf + (totalReadSize - remaining),
207                    m_readBuf + (m_read - m_readLeft),
208                    bufferedReadSize);
209             remaining -= bufferedReadSize;
210             m_readLeft -= bufferedReadSize;
211             continue;
212         }
213 
214         actual = speculativeRead(m_readBuf, kReadSize);
215 
216         if (actual == 0) {
217             ALOGD("%s: Failed reading from pipe: %d", __FUNCTION__,  errno);
218             return NULL;
219         }
220 
221         if (actual > 0) {
222             m_read = m_readLeft = actual;
223             continue;
224         }
225     }
226 
227     resetBackoff();
228     return userReadBuf;
229 }
230 
read(void * buf,size_t * inout_len)231 const unsigned char *AddressSpaceStream::read(void *buf, size_t *inout_len) {
232     unsigned char* dst = (unsigned char*)buf;
233     size_t wanted = *inout_len;
234     ssize_t actual = speculativeRead(dst, wanted);
235 
236     if (actual >= 0) {
237         *inout_len = actual;
238     } else {
239         return nullptr;
240     }
241 
242     return (const unsigned char*)dst;
243 }
244 
writeFully(const void * buf,size_t size)245 int AddressSpaceStream::writeFully(const void *buf, size_t size)
246 {
247     auto watchdog = WATCHDOG_BUILDER(m_healthMonitor, "ASG watchdog").build();
248     AEMU_SCOPED_TRACE("writeFully");
249     ensureType3Finished();
250     ensureType1Finished();
251 
252     m_context.ring_config->transfer_size = size;
253     m_context.ring_config->transfer_mode = 3;
254 
255     size_t sent = 0;
256     size_t preferredChunkSize = m_writeBufferSize / 4;
257     size_t chunkSize = size < preferredChunkSize ? size : preferredChunkSize;
258     const uint8_t* bufferBytes = (const uint8_t*)buf;
259 
260     bool hostPinged = false;
261     while (sent < size) {
262         size_t remaining = size - sent;
263         size_t sendThisTime = remaining < chunkSize ? remaining : chunkSize;
264 
265         long sentChunks =
266             ring_buffer_view_write(
267                 m_context.to_host_large_xfer.ring,
268                 &m_context.to_host_large_xfer.view,
269                 bufferBytes + sent, sendThisTime, 1);
270 
271         if (!hostPinged && *(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME &&
272             *(m_context.host_state) != ASG_HOST_STATE_RENDERING) {
273             notifyAvailable();
274             hostPinged = true;
275         }
276 
277         if (sentChunks == 0) {
278             ring_buffer_yield();
279             backoff();
280         }
281 
282         sent += sentChunks * sendThisTime;
283 
284         if (isInError()) {
285             return -1;
286         }
287     }
288 
289     bool isRenderingAfter = ASG_HOST_STATE_RENDERING == __atomic_load_n(m_context.host_state, __ATOMIC_ACQUIRE);
290 
291     if (!isRenderingAfter) {
292         notifyAvailable();
293     }
294 
295     ensureType3Finished();
296 
297     resetBackoff();
298     m_context.ring_config->transfer_mode = 1;
299     m_written += size;
300 
301     float mb = (float)m_written / 1048576.0f;
302     if (mb > 100.0f) {
303         ALOGD("%s: %f mb in %d notifs. %f mb/notif\n", __func__,
304               mb, m_notifs, m_notifs ? mb / (float)m_notifs : 0.0f);
305         m_notifs = 0;
306         m_written = 0;
307     }
308     return 0;
309 }
310 
writeFullyAsync(const void * buf,size_t size)311 int AddressSpaceStream::writeFullyAsync(const void *buf, size_t size)
312 {
313     auto watchdog = WATCHDOG_BUILDER(m_healthMonitor, "ASG watchdog").build();
314     AEMU_SCOPED_TRACE("writeFullyAsync");
315     ensureType3Finished();
316     ensureType1Finished();
317 
318     __atomic_store_n(&m_context.ring_config->transfer_size, size, __ATOMIC_RELEASE);
319     m_context.ring_config->transfer_mode = 3;
320 
321     size_t sent = 0;
322     size_t preferredChunkSize = m_writeBufferSize / 2;
323     size_t chunkSize = size < preferredChunkSize ? size : preferredChunkSize;
324     const uint8_t* bufferBytes = (const uint8_t*)buf;
325 
326     bool pingedHost = false;
327 
328     while (sent < size) {
329         size_t remaining = size - sent;
330         size_t sendThisTime = remaining < chunkSize ? remaining : chunkSize;
331 
332         long sentChunks =
333             ring_buffer_view_write(
334                 m_context.to_host_large_xfer.ring,
335                 &m_context.to_host_large_xfer.view,
336                 bufferBytes + sent, sendThisTime, 1);
337 
338         uint32_t hostState = __atomic_load_n(m_context.host_state, __ATOMIC_ACQUIRE);
339 
340         if (!pingedHost &&
341             hostState != ASG_HOST_STATE_CAN_CONSUME &&
342             hostState != ASG_HOST_STATE_RENDERING) {
343             pingedHost = true;
344             notifyAvailable();
345         }
346 
347         if (sentChunks == 0) {
348             ring_buffer_yield();
349             backoff();
350         }
351 
352         sent += sentChunks * sendThisTime;
353 
354         if (isInError()) {
355             return -1;
356         }
357     }
358 
359 
360     bool isRenderingAfter = ASG_HOST_STATE_RENDERING == __atomic_load_n(m_context.host_state, __ATOMIC_ACQUIRE);
361 
362     if (!isRenderingAfter) {
363         notifyAvailable();
364     }
365 
366     resetBackoff();
367     m_context.ring_config->transfer_mode = 1;
368     m_written += size;
369 
370     float mb = (float)m_written / 1048576.0f;
371     if (mb > 100.0f) {
372         ALOGD("%s: %f mb in %d notifs. %f mb/notif\n", __func__,
373               mb, m_notifs, m_notifs ? mb / (float)m_notifs : 0.0f);
374         m_notifs = 0;
375         m_written = 0;
376     }
377     return 0;
378 }
379 
commitBufferAndReadFully(size_t writeSize,void * userReadBufPtr,size_t totalReadSize)380 const unsigned char *AddressSpaceStream::commitBufferAndReadFully(
381     size_t writeSize, void *userReadBufPtr, size_t totalReadSize) {
382 
383     if (m_usingTmpBuf) {
384         writeFully(m_tmpBuf, writeSize);
385         m_usingTmpBuf = false;
386         m_tmpBufXferSize = 0;
387         return readFully(userReadBufPtr, totalReadSize);
388     } else {
389         commitBuffer(writeSize);
390         return readFully(userReadBufPtr, totalReadSize);
391     }
392 }
393 
isInError() const394 bool AddressSpaceStream::isInError() const {
395     return 1 == m_context.ring_config->in_error;
396 }
397 
speculativeRead(unsigned char * readBuffer,size_t trySize)398 ssize_t AddressSpaceStream::speculativeRead(unsigned char* readBuffer, size_t trySize) {
399     ensureType3Finished();
400     ensureType1Finished();
401 
402     size_t actuallyRead = 0;
403 
404     while (!actuallyRead) {
405 
406         uint32_t readAvail =
407             ring_buffer_available_read(
408                 m_context.from_host_large_xfer.ring,
409                 &m_context.from_host_large_xfer.view);
410 
411         if (!readAvail) {
412             ring_buffer_yield();
413             backoff();
414             continue;
415         }
416 
417         uint32_t toRead = readAvail > trySize ?  trySize : readAvail;
418 
419         long stepsRead = ring_buffer_view_read(
420             m_context.from_host_large_xfer.ring,
421             &m_context.from_host_large_xfer.view,
422             readBuffer, toRead, 1);
423 
424         actuallyRead += stepsRead * toRead;
425 
426         if (isInError()) {
427             return -1;
428         }
429     }
430 
431     return actuallyRead;
432 }
433 
notifyAvailable()434 void AddressSpaceStream::notifyAvailable() {
435     auto watchdog = WATCHDOG_BUILDER(m_healthMonitor, "ASG watchdog").build();
436     AEMU_SCOPED_TRACE("PING");
437     struct address_space_ping request;
438     request.metadata = ASG_NOTIFY_AVAILABLE;
439     request.resourceId = m_resourceId;
440     m_ops.ping(m_handle, &request);
441     ++m_notifs;
442 }
443 
getRelativeBufferPos(uint32_t pos)444 uint32_t AddressSpaceStream::getRelativeBufferPos(uint32_t pos) {
445     return pos & m_writeBufferMask;
446 }
447 
advanceWrite()448 void AddressSpaceStream::advanceWrite() {
449     m_writeStart += m_context.ring_config->flush_interval;
450 
451     if (m_writeStart == m_buf + m_context.ring_config->buffer_size) {
452         m_writeStart = m_buf;
453     }
454 }
455 
ensureConsumerFinishing()456 void AddressSpaceStream::ensureConsumerFinishing() {
457     uint32_t currAvailRead = ring_buffer_available_read(m_context.to_host, 0);
458 
459     while (currAvailRead) {
460         ring_buffer_yield();
461         uint32_t nextAvailRead = ring_buffer_available_read(m_context.to_host, 0);
462 
463         if (nextAvailRead != currAvailRead) {
464             break;
465         }
466 
467         if (*(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME &&
468             *(m_context.host_state) != ASG_HOST_STATE_RENDERING) {
469             notifyAvailable();
470             break;
471         }
472 
473         backoff();
474     }
475 }
476 
ensureType1Finished()477 void AddressSpaceStream::ensureType1Finished() {
478     auto watchdog = WATCHDOG_BUILDER(m_healthMonitor, "ASG watchdog").build();
479     AEMU_SCOPED_TRACE("ensureType1Finished");
480 
481     uint32_t currAvailRead =
482         ring_buffer_available_read(m_context.to_host, 0);
483 
484     while (currAvailRead) {
485         backoff();
486         ring_buffer_yield();
487         currAvailRead = ring_buffer_available_read(m_context.to_host, 0);
488         if (isInError()) {
489             return;
490         }
491     }
492 }
493 
ensureType3Finished()494 void AddressSpaceStream::ensureType3Finished() {
495     auto watchdog = WATCHDOG_BUILDER(m_healthMonitor, "ASG watchdog").build();
496     AEMU_SCOPED_TRACE("ensureType3Finished");
497     uint32_t availReadLarge =
498         ring_buffer_available_read(
499             m_context.to_host_large_xfer.ring,
500             &m_context.to_host_large_xfer.view);
501     while (availReadLarge) {
502         ring_buffer_yield();
503         backoff();
504         availReadLarge =
505             ring_buffer_available_read(
506                 m_context.to_host_large_xfer.ring,
507                 &m_context.to_host_large_xfer.view);
508         if (*(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME &&
509             *(m_context.host_state) != ASG_HOST_STATE_RENDERING) {
510             notifyAvailable();
511         }
512         if (isInError()) {
513             return;
514         }
515     }
516 }
517 
type1Write(uint32_t bufferOffset,size_t size)518 int AddressSpaceStream::type1Write(uint32_t bufferOffset, size_t size) {
519 
520     auto watchdog = WATCHDOG_BUILDER(m_healthMonitor, "ASG watchdog").build();
521     AEMU_SCOPED_TRACE("type1Write");
522 
523     ensureType3Finished();
524 
525     size_t sent = 0;
526     size_t sizeForRing = sizeof(struct asg_type1_xfer);
527 
528     struct asg_type1_xfer xfer = {
529         bufferOffset,
530         (uint32_t)size,
531     };
532 
533     uint8_t* writeBufferBytes = (uint8_t*)(&xfer);
534 
535     uint32_t maxOutstanding = 1;
536     uint32_t maxSteps = m_context.ring_config->buffer_size /
537             m_context.ring_config->flush_interval;
538 
539     if (maxSteps > 1) maxOutstanding = maxSteps - 1;
540 
541     uint32_t ringAvailReadNow = ring_buffer_available_read(m_context.to_host, 0);
542 
543     while (ringAvailReadNow >= maxOutstanding * sizeForRing) {
544         ringAvailReadNow = ring_buffer_available_read(m_context.to_host, 0);
545     }
546 
547     bool hostPinged = false;
548     while (sent < sizeForRing) {
549 
550         long sentChunks = ring_buffer_write(
551             m_context.to_host,
552             writeBufferBytes + sent,
553             sizeForRing - sent, 1);
554 
555         if (!hostPinged &&
556             *(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME &&
557             *(m_context.host_state) != ASG_HOST_STATE_RENDERING) {
558             notifyAvailable();
559             hostPinged = true;
560         }
561 
562         if (sentChunks == 0) {
563             ring_buffer_yield();
564             backoff();
565         }
566 
567         sent += sentChunks * (sizeForRing - sent);
568 
569         if (isInError()) {
570             return -1;
571         }
572     }
573 
574     bool isRenderingAfter = ASG_HOST_STATE_RENDERING == __atomic_load_n(m_context.host_state, __ATOMIC_ACQUIRE);
575 
576     if (!isRenderingAfter) {
577         notifyAvailable();
578     }
579 
580     m_written += size;
581 
582     float mb = (float)m_written / 1048576.0f;
583     if (mb > 100.0f) {
584         ALOGD("%s: %f mb in %d notifs. %f mb/notif\n", __func__,
585               mb, m_notifs, m_notifs ? mb / (float)m_notifs : 0.0f);
586         m_notifs = 0;
587         m_written = 0;
588     }
589 
590     resetBackoff();
591     return 0;
592 }
593 
backoff()594 void AddressSpaceStream::backoff() {
595 #if defined(__APPLE__) || defined(__MACOSX) || defined(__Fuchsia__) || defined(__linux__)
596     static const uint32_t kBackoffItersThreshold = 50000000;
597     static const uint32_t kBackoffFactorDoublingIncrement = 50000000;
598 #elif defined(__ANDROID__)
599     static const uint32_t kBackoffItersThreshold =
600         android::base::GetUintProperty("ro.boot.asg.backoffiters", 50000000);
601     static const uint32_t kBackoffFactorDoublingIncrement =
602         android::base::GetUintProperty("ro.boot.asg.backoffincrement", 50000000);
603 #endif
604     ++m_backoffIters;
605 
606     if (m_backoffIters > kBackoffItersThreshold) {
607         usleep(m_backoffFactor);
608         uint32_t itersSoFarAfterThreshold = m_backoffIters - kBackoffItersThreshold;
609         if (itersSoFarAfterThreshold > kBackoffFactorDoublingIncrement) {
610             m_backoffFactor = m_backoffFactor << 1;
611             if (m_backoffFactor > 1000) m_backoffFactor = 1000;
612             m_backoffIters = kBackoffItersThreshold;
613         }
614     }
615 }
616 
resetBackoff()617 void AddressSpaceStream::resetBackoff() {
618     m_backoffIters = 0;
619     m_backoffFactor = 1;
620 }
621