1 // Copyright 2018 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #include "aemu/base/ring_buffer.h"
15
16 #include <errno.h>
17 #include <string.h>
18 #ifdef _MSC_VER
19 #include "aemu/base/msvc.h"
20 #else
21 #include <sys/time.h>
22 #endif
23
24 #ifdef __x86_64__
25 #include <emmintrin.h>
26 #endif
27
28 #ifdef _WIN32
29 #include <windows.h>
30 #else
31 #include <sched.h>
32 #include <unistd.h>
33 #endif
34
35 #define RING_BUFFER_MASK (RING_BUFFER_SIZE - 1)
36
37 #define RING_BUFFER_VERSION 1
38
ring_buffer_init(struct ring_buffer * r)39 void ring_buffer_init(struct ring_buffer* r) {
40 r->host_version = 1;
41 r->write_pos = 0;
42 r->read_pos = 0;
43
44 r->read_live_count = 0;
45 r->read_yield_count = 0;
46 r->read_sleep_us_count = 0;
47
48 r->state = 0;
49 }
50
get_ring_pos(uint32_t index)51 static uint32_t get_ring_pos(uint32_t index) {
52 return index & RING_BUFFER_MASK;
53 }
54
ring_buffer_can_write(const struct ring_buffer * r,uint32_t bytes)55 bool ring_buffer_can_write(const struct ring_buffer* r, uint32_t bytes) {
56 uint32_t read_view;
57 __atomic_load(&r->read_pos, &read_view, __ATOMIC_SEQ_CST);
58 return get_ring_pos(read_view - r->write_pos - 1) >= bytes;
59 }
60
ring_buffer_can_read(const struct ring_buffer * r,uint32_t bytes)61 bool ring_buffer_can_read(const struct ring_buffer* r, uint32_t bytes) {
62 uint32_t write_view;
63 __atomic_load(&r->write_pos, &write_view, __ATOMIC_SEQ_CST);
64 return get_ring_pos(write_view - r->read_pos) >= bytes;
65 }
66
ring_buffer_write(struct ring_buffer * r,const void * data,uint32_t step_size,uint32_t steps)67 long ring_buffer_write(
68 struct ring_buffer* r, const void* data, uint32_t step_size, uint32_t steps) {
69 const uint8_t* data_bytes = (const uint8_t*)data;
70 uint32_t i;
71
72 for (i = 0; i < steps; ++i) {
73 if (!ring_buffer_can_write(r, step_size)) {
74 errno = -EAGAIN;
75 return (long)i;
76 }
77
78 // Needs to be split up into 2 writes for the edge case.
79 uint32_t available_at_end =
80 RING_BUFFER_SIZE - get_ring_pos(r->write_pos);
81
82 if (step_size > available_at_end) {
83 uint32_t remaining = step_size - available_at_end;
84 memcpy(
85 &r->buf[get_ring_pos(r->write_pos)],
86 data_bytes + i * step_size,
87 available_at_end);
88 memcpy(
89 &r->buf[get_ring_pos(r->write_pos + available_at_end)],
90 data_bytes + i * step_size + available_at_end,
91 remaining);
92 } else {
93 memcpy(
94 &r->buf[get_ring_pos(r->write_pos)],
95 data_bytes + i * step_size,
96 step_size);
97 }
98
99 __atomic_add_fetch(&r->write_pos, step_size, __ATOMIC_SEQ_CST);
100 }
101
102 errno = 0;
103 return (long)steps;
104 }
105
ring_buffer_read(struct ring_buffer * r,void * data,uint32_t step_size,uint32_t steps)106 long ring_buffer_read(
107 struct ring_buffer* r, void* data, uint32_t step_size, uint32_t steps) {
108 uint8_t* data_bytes = (uint8_t*)data;
109 uint32_t i;
110
111 for (i = 0; i < steps; ++i) {
112 if (!ring_buffer_can_read(r, step_size)) {
113 errno = -EAGAIN;
114 return (long)i;
115 }
116
117 // Needs to be split up into 2 reads for the edge case.
118 uint32_t available_at_end =
119 RING_BUFFER_SIZE - get_ring_pos(r->read_pos);
120
121 if (step_size > available_at_end) {
122 uint32_t remaining = step_size - available_at_end;
123 memcpy(
124 data_bytes + i * step_size,
125 &r->buf[get_ring_pos(r->read_pos)],
126 available_at_end);
127 memcpy(
128 data_bytes + i * step_size + available_at_end,
129 &r->buf[get_ring_pos(r->read_pos + available_at_end)],
130 remaining);
131 } else {
132 memcpy(
133 data_bytes + i * step_size,
134 &r->buf[get_ring_pos(r->read_pos)],
135 step_size);
136 }
137
138 __atomic_add_fetch(&r->read_pos, step_size, __ATOMIC_SEQ_CST);
139 }
140
141 errno = 0;
142 return (long)steps;
143 }
144
ring_buffer_advance_write(struct ring_buffer * r,uint32_t step_size,uint32_t steps)145 long ring_buffer_advance_write(
146 struct ring_buffer* r, uint32_t step_size, uint32_t steps) {
147 uint32_t i;
148
149 for (i = 0; i < steps; ++i) {
150 if (!ring_buffer_can_write(r, step_size)) {
151 errno = -EAGAIN;
152 return (long)i;
153 }
154
155 __atomic_add_fetch(&r->write_pos, step_size, __ATOMIC_SEQ_CST);
156 }
157
158 errno = 0;
159 return (long)steps;
160 }
161
ring_buffer_advance_read(struct ring_buffer * r,uint32_t step_size,uint32_t steps)162 long ring_buffer_advance_read(
163 struct ring_buffer* r, uint32_t step_size, uint32_t steps) {
164 uint32_t i;
165
166 for (i = 0; i < steps; ++i) {
167 if (!ring_buffer_can_read(r, step_size)) {
168 errno = -EAGAIN;
169 return (long)i;
170 }
171
172 __atomic_add_fetch(&r->read_pos, step_size, __ATOMIC_SEQ_CST);
173 }
174
175 errno = 0;
176 return (long)steps;
177 }
178
ring_buffer_calc_shift(uint32_t size)179 uint32_t ring_buffer_calc_shift(uint32_t size) {
180 uint32_t shift = 0;
181 while ((1 << shift) < size) {
182 ++shift;
183 }
184
185 // if size is not a power of 2,
186 if ((1 << shift) > size) {
187 --shift;
188 }
189 return shift;
190 }
191
ring_buffer_view_init(struct ring_buffer * r,struct ring_buffer_view * v,uint8_t * buf,uint32_t size)192 void ring_buffer_view_init(
193 struct ring_buffer* r,
194 struct ring_buffer_view* v,
195 uint8_t* buf,
196 uint32_t size) {
197
198 uint32_t shift = ring_buffer_calc_shift(size);
199
200 ring_buffer_init(r);
201
202 v->buf = buf;
203 v->size = (1 << shift);
204 v->mask = (1 << shift) - 1;
205 }
206
ring_buffer_init_view_only(struct ring_buffer_view * v,uint8_t * buf,uint32_t size)207 void ring_buffer_init_view_only(
208 struct ring_buffer_view* v,
209 uint8_t* buf,
210 uint32_t size) {
211
212 uint32_t shift = ring_buffer_calc_shift(size);
213
214 v->buf = buf;
215 v->size = (1 << shift);
216 v->mask = (1 << shift) - 1;
217 }
218
ring_buffer_view_get_ring_pos(const struct ring_buffer_view * v,uint32_t index)219 uint32_t ring_buffer_view_get_ring_pos(
220 const struct ring_buffer_view* v,
221 uint32_t index) {
222 return index & v->mask;
223 }
224
ring_buffer_view_can_write(const struct ring_buffer * r,const struct ring_buffer_view * v,uint32_t bytes)225 bool ring_buffer_view_can_write(
226 const struct ring_buffer* r,
227 const struct ring_buffer_view* v,
228 uint32_t bytes) {
229 uint32_t read_view;
230 __atomic_load(&r->read_pos, &read_view, __ATOMIC_SEQ_CST);
231 return ring_buffer_view_get_ring_pos(
232 v, read_view - r->write_pos - 1) >= bytes;
233 }
234
ring_buffer_view_can_read(const struct ring_buffer * r,const struct ring_buffer_view * v,uint32_t bytes)235 bool ring_buffer_view_can_read(
236 const struct ring_buffer* r,
237 const struct ring_buffer_view* v,
238 uint32_t bytes) {
239 uint32_t write_view;
240 __atomic_load(&r->write_pos, &write_view, __ATOMIC_SEQ_CST);
241 return ring_buffer_view_get_ring_pos(
242 v, write_view - r->read_pos) >= bytes;
243 }
244
ring_buffer_available_read(const struct ring_buffer * r,const struct ring_buffer_view * v)245 uint32_t ring_buffer_available_read(
246 const struct ring_buffer* r,
247 const struct ring_buffer_view* v) {
248 uint32_t write_view;
249 __atomic_load(&r->write_pos, &write_view, __ATOMIC_SEQ_CST);
250 if (v) {
251 return ring_buffer_view_get_ring_pos(
252 v, write_view - r->read_pos);
253 } else {
254 return get_ring_pos(write_view - r->read_pos);
255 }
256 }
257
ring_buffer_available_write(const struct ring_buffer * r,const struct ring_buffer_view * v)258 uint32_t ring_buffer_available_write(
259 const struct ring_buffer* r,
260 const struct ring_buffer_view* v) {
261 uint32_t read_view;
262 __atomic_load(&r->read_pos, &read_view, __ATOMIC_SEQ_CST);
263 if (v) {
264 return ring_buffer_view_get_ring_pos(
265 v, read_view - r->write_pos - 1);
266 } else {
267 return get_ring_pos(read_view - r->write_pos - 1);
268 }
269 }
270
ring_buffer_copy_contents(const struct ring_buffer * r,const struct ring_buffer_view * v,uint32_t wanted_bytes,uint8_t * res)271 int ring_buffer_copy_contents(
272 const struct ring_buffer* r,
273 const struct ring_buffer_view* v,
274 uint32_t wanted_bytes,
275 uint8_t* res) {
276
277 uint32_t total_available =
278 ring_buffer_available_read(r, v);
279 uint32_t available_at_end = 0;
280
281 if (v) {
282 available_at_end =
283 v->size - ring_buffer_view_get_ring_pos(v, r->read_pos);
284 } else {
285 available_at_end =
286 RING_BUFFER_SIZE - get_ring_pos(r->write_pos);
287 }
288
289 if (total_available < wanted_bytes) {
290 return -1;
291 }
292
293 if (v) {
294 if (wanted_bytes > available_at_end) {
295 uint32_t remaining = wanted_bytes - available_at_end;
296 memcpy(res,
297 &v->buf[ring_buffer_view_get_ring_pos(v, r->read_pos)],
298 available_at_end);
299 memcpy(res + available_at_end,
300 &v->buf[ring_buffer_view_get_ring_pos(v, r->read_pos + available_at_end)],
301 remaining);
302 } else {
303 memcpy(res,
304 &v->buf[ring_buffer_view_get_ring_pos(v, r->read_pos)],
305 wanted_bytes);
306 }
307 } else {
308 if (wanted_bytes > available_at_end) {
309 uint32_t remaining = wanted_bytes - available_at_end;
310 memcpy(res,
311 &r->buf[get_ring_pos(r->read_pos)],
312 available_at_end);
313 memcpy(res + available_at_end,
314 &r->buf[get_ring_pos(r->read_pos + available_at_end)],
315 remaining);
316 } else {
317 memcpy(res,
318 &r->buf[get_ring_pos(r->read_pos)],
319 wanted_bytes);
320 }
321 }
322 return 0;
323 }
324
ring_buffer_view_write(struct ring_buffer * r,struct ring_buffer_view * v,const void * data,uint32_t step_size,uint32_t steps)325 long ring_buffer_view_write(
326 struct ring_buffer* r,
327 struct ring_buffer_view* v,
328 const void* data, uint32_t step_size, uint32_t steps) {
329
330 uint8_t* data_bytes = (uint8_t*)data;
331 uint32_t i;
332
333 for (i = 0; i < steps; ++i) {
334 if (!ring_buffer_view_can_write(r, v, step_size)) {
335 errno = -EAGAIN;
336 return (long)i;
337 }
338
339 // Needs to be split up into 2 writes for the edge case.
340 uint32_t available_at_end =
341 v->size - ring_buffer_view_get_ring_pos(v, r->write_pos);
342
343 if (step_size > available_at_end) {
344 uint32_t remaining = step_size - available_at_end;
345 memcpy(
346 &v->buf[ring_buffer_view_get_ring_pos(v, r->write_pos)],
347 data_bytes + i * step_size,
348 available_at_end);
349 memcpy(
350 &v->buf[ring_buffer_view_get_ring_pos(v, r->write_pos + available_at_end)],
351 data_bytes + i * step_size + available_at_end,
352 remaining);
353 } else {
354 memcpy(
355 &v->buf[ring_buffer_view_get_ring_pos(v, r->write_pos)],
356 data_bytes + i * step_size,
357 step_size);
358 }
359
360 __atomic_add_fetch(&r->write_pos, step_size, __ATOMIC_SEQ_CST);
361 }
362
363 errno = 0;
364 return (long)steps;
365
366 }
367
ring_buffer_view_read(struct ring_buffer * r,struct ring_buffer_view * v,void * data,uint32_t step_size,uint32_t steps)368 long ring_buffer_view_read(
369 struct ring_buffer* r,
370 struct ring_buffer_view* v,
371 void* data, uint32_t step_size, uint32_t steps) {
372 uint8_t* data_bytes = (uint8_t*)data;
373 uint32_t i;
374
375 for (i = 0; i < steps; ++i) {
376 if (!ring_buffer_view_can_read(r, v, step_size)) {
377 errno = -EAGAIN;
378 return (long)i;
379 }
380
381 // Needs to be split up into 2 reads for the edge case.
382 uint32_t available_at_end =
383 v->size - ring_buffer_view_get_ring_pos(v, r->read_pos);
384
385 if (step_size > available_at_end) {
386 uint32_t remaining = step_size - available_at_end;
387 memcpy(
388 data_bytes + i * step_size,
389 &v->buf[ring_buffer_view_get_ring_pos(v, r->read_pos)],
390 available_at_end);
391 memcpy(
392 data_bytes + i * step_size + available_at_end,
393 &v->buf[ring_buffer_view_get_ring_pos(v, r->read_pos + available_at_end)],
394 remaining);
395 } else {
396 memcpy(data_bytes + i * step_size,
397 &v->buf[ring_buffer_view_get_ring_pos(v, r->read_pos)],
398 step_size);
399 }
400 __atomic_add_fetch(&r->read_pos, step_size, __ATOMIC_SEQ_CST);
401 }
402
403 errno = 0;
404 return (long)steps;
405 }
406
ring_buffer_yield()407 void ring_buffer_yield() {
408 #ifdef _WIN32
409 _mm_pause();
410 #else
411 sched_yield();
412 #endif
413 }
414
ring_buffer_sleep()415 static void ring_buffer_sleep() {
416 #ifdef _WIN32
417 Sleep(2);
418 #else
419 usleep(2000);
420 #endif
421 }
422
ring_buffer_curr_us()423 static uint64_t ring_buffer_curr_us() {
424 uint64_t res;
425 struct timeval tv;
426 gettimeofday(&tv, NULL);
427 res = tv.tv_sec * 1000000ULL + tv.tv_usec;
428 return res;
429 }
430
431 static const uint32_t yield_backoff_us = 1000;
432 static const uint32_t sleep_backoff_us = 2000;
433
ring_buffer_wait_write(const struct ring_buffer * r,const struct ring_buffer_view * v,uint32_t bytes,uint64_t timeout_us)434 bool ring_buffer_wait_write(
435 const struct ring_buffer* r,
436 const struct ring_buffer_view* v,
437 uint32_t bytes,
438 uint64_t timeout_us) {
439
440 uint64_t start_us = ring_buffer_curr_us();
441 uint64_t curr_wait_us;
442
443 bool can_write =
444 v ? ring_buffer_view_can_write(r, v, bytes) :
445 ring_buffer_can_write(r, bytes);
446
447 while (!can_write) {
448 #ifdef __x86_64
449 _mm_pause();
450 #endif
451 curr_wait_us = ring_buffer_curr_us() - start_us;
452
453 if (curr_wait_us > yield_backoff_us) {
454 ring_buffer_yield();
455 }
456
457 if (curr_wait_us > sleep_backoff_us) {
458 ring_buffer_sleep();
459 }
460
461 if (curr_wait_us > timeout_us) {
462 return false;
463 }
464
465 can_write =
466 v ? ring_buffer_view_can_write(r, v, bytes) :
467 ring_buffer_can_write(r, bytes);
468 }
469
470 return true;
471 }
472
ring_buffer_wait_read(const struct ring_buffer * r,const struct ring_buffer_view * v,uint32_t bytes,uint64_t timeout_us)473 bool ring_buffer_wait_read(
474 const struct ring_buffer* r,
475 const struct ring_buffer_view* v,
476 uint32_t bytes,
477 uint64_t timeout_us) {
478
479 uint64_t start_us = ring_buffer_curr_us();
480 uint64_t curr_wait_us;
481
482 bool can_read =
483 v ? ring_buffer_view_can_read(r, v, bytes) :
484 ring_buffer_can_read(r, bytes);
485
486 while (!can_read) {
487 // TODO(bohu): find aarch64 equivalent
488 #ifdef __x86_64
489 _mm_pause();
490 #endif
491 curr_wait_us = ring_buffer_curr_us() - start_us;
492
493 if (curr_wait_us > yield_backoff_us) {
494 ring_buffer_yield();
495 ((struct ring_buffer*)r)->read_yield_count++;
496 }
497
498 if (curr_wait_us > sleep_backoff_us) {
499 ring_buffer_sleep();
500 ((struct ring_buffer*)r)->read_sleep_us_count += 2000;
501 }
502
503 if (curr_wait_us > timeout_us) {
504 return false;
505 }
506
507 can_read =
508 v ? ring_buffer_view_can_read(r, v, bytes) :
509 ring_buffer_can_read(r, bytes);
510 }
511
512 ((struct ring_buffer*)r)->read_live_count++;
513 return true;
514 }
515
get_step_size(struct ring_buffer * r,struct ring_buffer_view * v,uint32_t bytes)516 static uint32_t get_step_size(
517 struct ring_buffer* r,
518 struct ring_buffer_view* v,
519 uint32_t bytes) {
520
521 uint32_t available = v ? (v->size >> 1) : (RING_BUFFER_SIZE >> 1);
522 uint32_t res = available < bytes ? available : bytes;
523
524 return res;
525 }
526
ring_buffer_write_fully(struct ring_buffer * r,struct ring_buffer_view * v,const void * data,uint32_t bytes)527 void ring_buffer_write_fully(
528 struct ring_buffer* r,
529 struct ring_buffer_view* v,
530 const void* data,
531 uint32_t bytes) {
532 ring_buffer_write_fully_with_abort(r, v, data, bytes, 0, 0);
533 }
534
ring_buffer_read_fully(struct ring_buffer * r,struct ring_buffer_view * v,void * data,uint32_t bytes)535 void ring_buffer_read_fully(
536 struct ring_buffer* r,
537 struct ring_buffer_view* v,
538 void* data,
539 uint32_t bytes) {
540 ring_buffer_read_fully_with_abort(r, v, data, bytes, 0, 0);
541 }
542
ring_buffer_write_fully_with_abort(struct ring_buffer * r,struct ring_buffer_view * v,const void * data,uint32_t bytes,uint32_t abort_value,const volatile uint32_t * abort_ptr)543 uint32_t ring_buffer_write_fully_with_abort(
544 struct ring_buffer* r,
545 struct ring_buffer_view* v,
546 const void* data,
547 uint32_t bytes,
548 uint32_t abort_value,
549 const volatile uint32_t* abort_ptr) {
550
551 uint32_t candidate_step = get_step_size(r, v, bytes);
552 uint32_t processed = 0;
553
554 uint8_t* dst = (uint8_t*)data;
555
556 while (processed < bytes) {
557 if (bytes - processed < candidate_step) {
558 candidate_step = bytes - processed;
559 }
560
561 long processed_here = 0;
562 ring_buffer_wait_write(r, v, candidate_step, (uint64_t)(-1));
563
564 if (v) {
565 processed_here = ring_buffer_view_write(r, v, dst + processed, candidate_step, 1);
566 } else {
567 processed_here = ring_buffer_write(r, dst + processed, candidate_step, 1);
568 }
569
570 processed += processed_here ? candidate_step : 0;
571
572 if (abort_ptr && (abort_value == *abort_ptr)) {
573 return processed;
574 }
575 }
576
577 return processed;
578 }
579
ring_buffer_read_fully_with_abort(struct ring_buffer * r,struct ring_buffer_view * v,void * data,uint32_t bytes,uint32_t abort_value,const volatile uint32_t * abort_ptr)580 uint32_t ring_buffer_read_fully_with_abort(
581 struct ring_buffer* r,
582 struct ring_buffer_view* v,
583 void* data,
584 uint32_t bytes,
585 uint32_t abort_value,
586 const volatile uint32_t* abort_ptr) {
587
588 uint32_t candidate_step = get_step_size(r, v, bytes);
589 uint32_t processed = 0;
590
591 uint8_t* dst = (uint8_t*)data;
592
593 while (processed < bytes) {
594 #ifdef __x86_64
595 _mm_pause();
596 #endif
597 if (bytes - processed < candidate_step) {
598 candidate_step = bytes - processed;
599 }
600
601 long processed_here = 0;
602 ring_buffer_wait_read(r, v, candidate_step, (uint64_t)(-1));
603
604 if (v) {
605 processed_here = ring_buffer_view_read(r, v, dst + processed, candidate_step, 1);
606 } else {
607 processed_here = ring_buffer_read(r, dst + processed, candidate_step, 1);
608 }
609
610 processed += processed_here ? candidate_step : 0;
611
612 if (abort_ptr && (abort_value == *abort_ptr)) {
613 return processed;
614 }
615 }
616
617 return processed;
618 }
619
ring_buffer_sync_init(struct ring_buffer * r)620 void ring_buffer_sync_init(struct ring_buffer* r) {
621 __atomic_store_n(&r->state, RING_BUFFER_SYNC_PRODUCER_IDLE, __ATOMIC_SEQ_CST);
622 }
623
ring_buffer_producer_acquire(struct ring_buffer * r)624 bool ring_buffer_producer_acquire(struct ring_buffer* r) {
625 uint32_t expected_idle = RING_BUFFER_SYNC_PRODUCER_IDLE;
626 bool success = __atomic_compare_exchange_n(
627 &r->state,
628 &expected_idle,
629 RING_BUFFER_SYNC_PRODUCER_ACTIVE,
630 false /* strong */,
631 __ATOMIC_SEQ_CST,
632 __ATOMIC_SEQ_CST);
633 return success;
634 }
635
ring_buffer_producer_acquire_from_hangup(struct ring_buffer * r)636 bool ring_buffer_producer_acquire_from_hangup(struct ring_buffer* r) {
637 uint32_t expected_hangup = RING_BUFFER_SYNC_CONSUMER_HUNG_UP;
638 bool success = __atomic_compare_exchange_n(
639 &r->state,
640 &expected_hangup,
641 RING_BUFFER_SYNC_PRODUCER_ACTIVE,
642 false /* strong */,
643 __ATOMIC_SEQ_CST,
644 __ATOMIC_SEQ_CST);
645 return success;
646 }
647
ring_buffer_producer_wait_hangup(struct ring_buffer * r)648 void ring_buffer_producer_wait_hangup(struct ring_buffer* r) {
649 while (__atomic_load_n(&r->state, __ATOMIC_SEQ_CST) !=
650 RING_BUFFER_SYNC_CONSUMER_HUNG_UP) {
651 ring_buffer_yield();
652 }
653 }
654
ring_buffer_producer_idle(struct ring_buffer * r)655 void ring_buffer_producer_idle(struct ring_buffer* r) {
656 __atomic_store_n(&r->state, RING_BUFFER_SYNC_PRODUCER_IDLE, __ATOMIC_SEQ_CST);
657 }
658
ring_buffer_consumer_hangup(struct ring_buffer * r)659 bool ring_buffer_consumer_hangup(struct ring_buffer* r) {
660 uint32_t expected_idle = RING_BUFFER_SYNC_PRODUCER_IDLE;
661 bool success = __atomic_compare_exchange_n(
662 &r->state,
663 &expected_idle,
664 RING_BUFFER_SYNC_CONSUMER_HANGING_UP,
665 false /* strong */,
666 __ATOMIC_SEQ_CST,
667 __ATOMIC_SEQ_CST);
668 return success;
669 }
670
ring_buffer_consumer_wait_producer_idle(struct ring_buffer * r)671 void ring_buffer_consumer_wait_producer_idle(struct ring_buffer* r) {
672 while (__atomic_load_n(&r->state, __ATOMIC_SEQ_CST) !=
673 RING_BUFFER_SYNC_PRODUCER_IDLE) {
674 ring_buffer_yield();
675 }
676 }
677
ring_buffer_consumer_hung_up(struct ring_buffer * r)678 void ring_buffer_consumer_hung_up(struct ring_buffer* r) {
679 __atomic_store_n(&r->state, RING_BUFFER_SYNC_CONSUMER_HUNG_UP, __ATOMIC_SEQ_CST);
680 }
681