A modern Music Player Daemon based on Rockbox open source high quality audio player
libadwaita
audio
rust
zig
deno
mpris
rockbox
mpd
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2007 Nicolas Pennequin
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21#include "config.h"
22#include <string.h>
23#include "system.h"
24#include "storage.h"
25#include "thread.h"
26#include "kernel.h"
27#include "panic.h"
28#include "debug.h"
29#include "file.h"
30#include "appevents.h"
31#include "metadata.h"
32#include "bmp.h"
33#ifdef HAVE_ALBUMART
34#include "albumart.h"
35#include "jpeg_load.h"
36#include "playback.h"
37#endif
38#include "buffering.h"
39#include "linked_list.h"
40
41/* Define LOGF_ENABLE to enable logf output in this file */
42/* #define LOGF_ENABLE */
43#include "logf.h"
44
45#define BUF_MAX_HANDLES 384
46
47/* macros to enable logf for queues
48 logging on SYS_TIMEOUT can be disabled */
49#ifdef SIMULATOR
50/* Define this for logf output of all queuing except SYS_TIMEOUT */
51#define BUFFERING_LOGQUEUES
52/* Define this to logf SYS_TIMEOUT messages */
53/* #define BUFFERING_LOGQUEUES_SYS_TIMEOUT */
54#endif
55
56#ifdef BUFFERING_LOGQUEUES
57#define LOGFQUEUE logf
58#else
59#define LOGFQUEUE(...)
60#endif
61
62#ifdef BUFFERING_LOGQUEUES_SYS_TIMEOUT
63#define LOGFQUEUE_SYS_TIMEOUT logf
64#else
65#define LOGFQUEUE_SYS_TIMEOUT(...)
66#endif
67
68#define GUARD_BUFSIZE (32*1024)
69
70/* amount of data to read in one read() call */
71#define BUFFERING_DEFAULT_FILECHUNK (1024*32)
72
73enum handle_flags
74{
75 H_CANWRAP = 0x1, /* Handle data may wrap in buffer */
76 H_ALLOCALL = 0x2, /* All data must be allocated up front */
77 H_FIXEDDATA = 0x4, /* Data is fixed in position */
78};
79
80struct memory_handle {
81 struct lld_node hnode; /* Handle list node (first!) */
82 struct lld_node mrunode;/* MRU list node (second!) */
83 size_t size; /* Size of this structure + its auxilliary data */
84 int id; /* A unique ID for the handle */
85 enum data_type type; /* Type of data buffered with this handle */
86 uint8_t flags; /* Handle property flags */
87 int8_t pinned; /* Count of pinnings */
88 int8_t signaled; /* Stop any attempt at waiting to get the data */
89 int fd; /* File descriptor to path (-1 if closed) */
90 size_t data; /* Start index of the handle's data buffer */
91 size_t ridx; /* Read pointer, relative to the main buffer */
92 size_t widx; /* Write pointer, relative to the main buffer */
93 off_t filesize; /* File total length (possibly trimmed at tail) */
94 off_t start; /* Offset at which we started reading the file */
95 off_t pos; /* Read position in file */
96 off_t volatile end; /* Offset at which we stopped reading the file */
97 char path[]; /* Path if data originated in a file */
98};
99
100/* Minimum allowed handle movement */
101#define MIN_MOVE_DELTA sizeof(struct memory_handle)
102
103struct buf_message_data
104{
105 int handle_id;
106 intptr_t data;
107};
108
109static char *buffer;
110static char *guard_buffer;
111
112static size_t buffer_len;
113
114/* Configuration */
115static size_t conf_watermark = 0; /* Level to trigger filebuf fill */
116static size_t high_watermark = 0; /* High watermark for rebuffer */
117
118static struct lld_head handle_list; /* buffer-order handle list */
119static struct lld_head mru_cache; /* MRU-ordered list of handles */
120static int num_handles; /* number of handles in the lists */
121static int base_handle_id;
122
123/* Main lock for adding / removing handles */
124static struct mutex llist_mutex SHAREDBSS_ATTR;
125
126#define HLIST_HANDLE(node) \
127 ({ struct lld_node *__node = (node); \
128 (struct memory_handle *)__node; })
129
130#define HLIST_FIRST \
131 HLIST_HANDLE(handle_list.head)
132
133#define HLIST_LAST \
134 HLIST_HANDLE(handle_list.tail)
135
136#define HLIST_PREV(h) \
137 HLIST_HANDLE((h)->hnode.prev)
138
139#define HLIST_NEXT(h) \
140 HLIST_HANDLE((h)->hnode.next)
141
142#define MRU_HANDLE(m) \
143 container_of((m), struct memory_handle, mrunode)
144
145static struct data_counters
146{
147 size_t remaining; /* Amount of data needing to be buffered */
148 size_t buffered; /* Amount of data currently in the buffer */
149 size_t useful; /* Amount of data still useful to the user */
150} data_counters;
151
152
153/* Messages available to communicate with the buffering thread */
154enum
155{
156 Q_BUFFER_HANDLE = 1, /* Request buffering of a handle, this should not be
157 used in a low buffer situation. */
158 Q_REBUFFER_HANDLE, /* Request reset and rebuffering of a handle at a new
159 file starting position. */
160 Q_CLOSE_HANDLE, /* Request closing a handle */
161
162 /* Configuration: */
163 Q_START_FILL, /* Request that the buffering thread initiate a buffer
164 fill at its earliest convenience */
165 Q_HANDLE_ADDED, /* Inform the buffering thread that a handle was added,
166 (which means the disk is spinning) */
167};
168
169/* Buffering thread */
170static void buffering_thread(void);
171static long buffering_stack[(DEFAULT_STACK_SIZE + 0x2000)/sizeof(long)];
172static const char buffering_thread_name[] = "buffering";
173static unsigned int buffering_thread_id = 0;
174static struct event_queue buffering_queue SHAREDBSS_ATTR;
175static struct queue_sender_list buffering_queue_sender_list SHAREDBSS_ATTR;
176
177static void close_fd(int *fd_p)
178{
179 int fd = *fd_p;
180 if (fd >= 0) {
181 close(fd);
182 *fd_p = -1;
183 }
184}
185
186/* Ring buffer helper functions */
187static inline void * ringbuf_ptr(uintptr_t p)
188{
189 return buffer + p;
190}
191
192static inline uintptr_t ringbuf_offset(const void *ptr)
193{
194 return (uintptr_t)(ptr - (void *)buffer);
195}
196
197/* Buffer pointer (p) plus value (v), wrapped if necessary */
198static inline uintptr_t ringbuf_add(uintptr_t p, size_t v)
199{
200 uintptr_t res = p + v;
201 if (res >= buffer_len)
202 res -= buffer_len; /* wrap if necssary */
203 return res;
204}
205
206/* Buffer pointer (p) minus value (v), wrapped if necessary */
207/* Interprets p == v as empty */
208static inline uintptr_t ringbuf_sub_empty(uintptr_t p, size_t v)
209{
210 uintptr_t res = p;
211 if (p < v)
212 res += buffer_len; /* wrap */
213
214 return res - v;
215}
216
217/* Buffer pointer (p) minus value (v), wrapped if necessary */
218/* Interprets p == v as full */
219static inline uintptr_t ringbuf_sub_full(uintptr_t p, size_t v)
220{
221 uintptr_t res = p;
222 if (p <= v)
223 res += buffer_len; /* wrap */
224
225 return res - v;
226}
227
228/* How far value (v) plus buffer pointer (p1) will cross buffer pointer (p2) */
229/* Interprets p1 == p2 as empty */
230static inline ssize_t ringbuf_add_cross_empty(uintptr_t p1, size_t v,
231 uintptr_t p2)
232{
233 ssize_t res = p1 + v - p2;
234 if (p1 >= p2) /* wrap if necessary */
235 res -= buffer_len;
236
237 return res;
238}
239
240/* How far value (v) plus buffer pointer (p1) will cross buffer pointer (p2) */
241/* Interprets p1 == p2 as full */
242static inline ssize_t ringbuf_add_cross_full(uintptr_t p1, size_t v,
243 uintptr_t p2)
244{
245 ssize_t res = p1 + v - p2;
246 if (p1 > p2) /* wrap if necessary */
247 res -= buffer_len;
248
249 return res;
250}
251
252/* Real buffer watermark */
253#define BUF_WATERMARK MIN(conf_watermark, high_watermark)
254
255static size_t bytes_used(void)
256{
257 struct memory_handle *first = HLIST_FIRST;
258 if (!first) {
259 return 0;
260 }
261
262 return ringbuf_sub_full(HLIST_LAST->widx, ringbuf_offset(first));
263}
264
265/*
266LINKED LIST MANAGEMENT
267======================
268
269add_handle : Create a new handle
270link_handle : Add a handle to the list
271unlink_handle : Remove a handle from the list
272find_handle : Get a handle pointer from an ID
273move_handle : Move a handle in the buffer (with or without its data)
274
275These functions only handle the linked list structure. They don't touch the
276contents of the struct memory_handle headers.
277
278Doubly-linked list, not circular.
279New handles are added at the tail.
280
281num_handles = N
282 NULL <- h0 <-> h1 <-> h2 -> ... <- hN-1 -> NULL
283head=> --------^ ^
284tail=> -----------------------------------+
285
286MRU cache is similar except new handles are added at the head and the most-
287recently-accessed handle is always moved to the head (if not already there).
288
289*/
290
291static int next_handle_id(void)
292{
293 static int cur_handle_id = 0;
294
295 int next_hid = cur_handle_id + 1;
296 if (next_hid == INT_MAX)
297 cur_handle_id = 0; /* next would overflow; reset the counter */
298 else
299 cur_handle_id = next_hid;
300
301 return next_hid;
302}
303
304/* Adds the handle to the linked list */
305static void link_handle(struct memory_handle *h)
306{
307 lld_insert_last(&handle_list, &h->hnode);
308 lld_insert_first(&mru_cache, &h->mrunode);
309 num_handles++;
310}
311
312/* Delete a given memory handle from the linked list */
313static void unlink_handle(struct memory_handle *h)
314{
315 lld_remove(&handle_list, &h->hnode);
316 lld_remove(&mru_cache, &h->mrunode);
317 num_handles--;
318}
319
320/* Adjusts handle list pointers _before_ it's actually moved */
321static void adjust_handle_node(struct lld_head *list,
322 struct lld_node *srcnode,
323 struct lld_node *destnode)
324{
325 if (srcnode->prev) {
326 srcnode->prev->next = destnode;
327 } else {
328 list->head = destnode;
329 }
330
331 if (srcnode->next) {
332 srcnode->next->prev = destnode;
333 } else {
334 list->tail = destnode;
335 }
336}
337
338/* Add a new handle to the linked list and return it. It will have become the
339 new current handle.
340 flags contains information on how this may be allocated
341 data_size must contain the size of what will be in the handle.
342 widx_out points to variable to receive first available byte of data area
343 returns a valid memory handle if all conditions for allocation are met.
344 NULL if there memory_handle itself cannot be allocated or if the
345 data_size cannot be allocated and alloc_all is set. */
346static struct memory_handle *
347add_handle(unsigned int flags, size_t data_size, const char *path,
348 size_t *data_out)
349{
350 /* Gives each handle a unique id */
351 if (num_handles >= BUF_MAX_HANDLES)
352 return NULL;
353
354 size_t ridx = 0, widx = 0;
355 off_t cur_total = 0;
356
357 struct memory_handle *first = HLIST_FIRST;
358 if (first) {
359 /* Buffer is not empty */
360 struct memory_handle *last = HLIST_LAST;
361 ridx = ringbuf_offset(first);
362 widx = last->data;
363 cur_total = last->filesize - last->start;
364 }
365
366 if (cur_total > 0) {
367 /* the current handle hasn't finished buffering. We can only add
368 a new one if there is already enough free space to finish
369 the buffering. */
370 if (ringbuf_add_cross_full(widx, cur_total, ridx) > 0) {
371 /* Not enough space to finish allocation */
372 return NULL;
373 } else {
374 /* Apply all the needed reserve */
375 widx = ringbuf_add(widx, cur_total);
376 }
377 }
378
379 /* Align to align size up */
380 size_t pathsize = path ? strlen(path) + 1 : 0;
381 size_t adjust = ALIGN_UP(widx, alignof(struct memory_handle)) - widx;
382 size_t index = ringbuf_add(widx, adjust);
383 size_t handlesize = ALIGN_UP(sizeof(struct memory_handle) + pathsize,
384 alignof(struct memory_handle));
385 size_t len = handlesize + data_size;
386
387 /* First, will the handle wrap? */
388 /* If the handle would wrap, move to the beginning of the buffer,
389 * or if the data must not but would wrap, move it to the beginning */
390 if (index + handlesize > buffer_len ||
391 (!(flags & H_CANWRAP) && index + len > buffer_len)) {
392 index = 0;
393 }
394
395 /* How far we shifted index to align things, must be < buffer_len */
396 size_t shift = ringbuf_sub_empty(index, widx);
397
398 /* How much space are we short in the actual ring buffer? */
399 ssize_t overlap = first ?
400 ringbuf_add_cross_full(widx, shift + len, ridx) :
401 ringbuf_add_cross_empty(widx, shift + len, ridx);
402
403 if (overlap > 0 &&
404 ((flags & H_ALLOCALL) || (size_t)overlap > data_size)) {
405 /* Not enough space for required allocations */
406 return NULL;
407 }
408
409 /* There is enough space for the required data, initialize the struct */
410 struct memory_handle *h = ringbuf_ptr(index);
411
412 h->size = handlesize;
413 h->id = next_handle_id();
414 h->flags = flags;
415 h->pinned = 0; /* Can be moved */
416 h->signaled = 0; /* Data can be waited for */
417
418 /* Save the provided path */
419 if (path)
420 memcpy(h->path, path, pathsize);
421
422 /* Return the start of the data area */
423 *data_out = ringbuf_add(index, handlesize);
424
425 return h;
426}
427
428/* Return a pointer to the memory handle of given ID.
429 NULL if the handle wasn't found */
430static struct memory_handle * find_handle(int handle_id)
431{
432 struct memory_handle *h = NULL;
433 struct lld_node *mru = mru_cache.head;
434 struct lld_node *m = mru;
435
436 while (m && MRU_HANDLE(m)->id != handle_id) {
437 m = m->next;
438 }
439
440 if (m) {
441 if (m != mru) {
442 lld_remove(&mru_cache, m);
443 lld_insert_first(&mru_cache, m);
444 }
445
446 h = MRU_HANDLE(m);
447 }
448
449 return h;
450}
451
452/* Move a memory handle and data_size of its data delta bytes along the buffer.
453 delta maximum bytes available to move the handle. If the move is performed
454 it is set to the actual distance moved.
455 data_size is the amount of data to move along with the struct.
456 returns true if the move is successful and false if the handle is NULL,
457 the move would be less than the size of a memory_handle after
458 correcting for wraps or if the handle is not found in the linked
459 list for adjustment. This function has no side effects if false
460 is returned. */
461static bool move_handle(struct memory_handle **h, size_t *delta,
462 size_t data_size)
463{
464 struct memory_handle *src;
465
466 if (h == NULL || (src = *h) == NULL)
467 return false;
468
469 size_t size_to_move = src->size + data_size;
470
471 /* Align to align size down */
472 size_t final_delta = *delta;
473 final_delta = ALIGN_DOWN(final_delta, alignof(struct memory_handle));
474 if (final_delta < MIN_MOVE_DELTA) {
475 /* It's not legal to move less than MIN_MOVE_DELTA */
476 return false;
477 }
478
479 uintptr_t oldpos = ringbuf_offset(src);
480 uintptr_t newpos = ringbuf_add(oldpos, final_delta);
481 intptr_t overlap = ringbuf_add_cross_full(newpos, size_to_move, buffer_len);
482 intptr_t overlap_old = ringbuf_add_cross_full(oldpos, size_to_move, buffer_len);
483
484 if (overlap > 0) {
485 /* Some part of the struct + data would wrap, maybe ok */
486 ssize_t correction = 0;
487 /* If the overlap lands inside the memory_handle */
488 if (!(src->flags & H_CANWRAP)) {
489 /* Otherwise the overlap falls in the data area and must all be
490 * backed out. This may become conditional if ever we move
491 * data that is allowed to wrap (ie audio) */
492 correction = overlap;
493 } else if ((uintptr_t)overlap > data_size) {
494 /* Correct the position and real delta to prevent the struct from
495 * wrapping, this guarantees an aligned delta if the struct size is
496 * aligned and the buffer is aligned */
497 correction = overlap - data_size;
498 }
499 if (correction) {
500 /* Align correction to align size up */
501 correction = ALIGN_UP(correction, alignof(struct memory_handle));
502 if (final_delta < correction + MIN_MOVE_DELTA) {
503 /* Delta cannot end up less than MIN_MOVE_DELTA */
504 return false;
505 }
506 newpos -= correction;
507 overlap -= correction;/* Used below to know how to split the data */
508 final_delta -= correction;
509 }
510 }
511
512 struct memory_handle *dest = ringbuf_ptr(newpos);
513
514 /* Adjust list pointers */
515 adjust_handle_node(&handle_list, &src->hnode, &dest->hnode);
516 adjust_handle_node(&mru_cache, &src->mrunode, &dest->mrunode);
517
518 /* x = handle(s) following this one...
519 * ...if last handle, unmoveable if metadata, only shrinkable if audio.
520 * In other words, no legal move can be made that would have the src head
521 * and dest tail of the data overlap itself. These facts reduce the
522 * problem to four essential permutations.
523 *
524 * movement: always "clockwise" >>>>
525 *
526 * (src nowrap, dest nowrap)
527 * |0123 x |
528 * | 0123x | etc...
529 * move: "0123"
530 *
531 * (src nowrap, dest wrap)
532 * | x0123 |
533 * |23x 01|
534 * move: "23", "01"
535 *
536 * (src wrap, dest nowrap)
537 * |23 x01|
538 * | 0123x |
539 * move: "23", "01"
540 *
541 * (src wrap, dest wrap)
542 * |23 x 01|
543 * |123x 0|
544 * move: "23", "1", "0"
545 */
546 if (overlap_old > 0) {
547 /* Move over already wrapped data by the final delta */
548 memmove(ringbuf_ptr(final_delta), ringbuf_ptr(0), overlap_old);
549 if (overlap <= 0)
550 size_to_move -= overlap_old;
551 }
552
553 if (overlap > 0) {
554 /* Move data that now wraps to the beginning */
555 size_to_move -= overlap;
556 memmove(ringbuf_ptr(0), SKIPBYTES(src, size_to_move),
557 overlap_old > 0 ? final_delta : (size_t)overlap);
558 }
559
560 /* Move leading fragment containing handle struct */
561 memmove(dest, src, size_to_move);
562
563 /* Update the caller with the new location of h and the distance moved */
564 *h = dest;
565 *delta = final_delta;
566 return true;
567}
568
569
570/*
571BUFFER SPACE MANAGEMENT
572=======================
573
574update_data_counters: Updates the values in data_counters
575buffer_handle : Buffer data for a handle
576rebuffer_handle : Seek to a nonbuffered part of a handle by rebuffering the data
577shrink_handle : Free buffer space by moving a handle
578fill_buffer : Call buffer_handle for all handles that have data to buffer
579
580These functions are used by the buffering thread to manage buffer space.
581*/
582
583static int update_data_counters(struct data_counters *dc)
584{
585 size_t buffered = 0;
586 size_t remaining = 0;
587 size_t useful = 0;
588
589 if (dc == NULL)
590 dc = &data_counters;
591
592 mutex_lock(&llist_mutex);
593
594 int num = num_handles;
595 struct memory_handle *m = find_handle(base_handle_id);
596 bool is_useful = m == NULL;
597
598 for (m = HLIST_FIRST; m; m = HLIST_NEXT(m))
599 {
600 off_t pos = m->pos;
601 off_t end = m->end;
602
603 buffered += end - m->start;
604 remaining += m->filesize - end;
605
606 if (m->id == base_handle_id)
607 is_useful = true;
608
609 if (is_useful)
610 useful += end - pos;
611 }
612
613 mutex_unlock(&llist_mutex);
614
615 dc->buffered = buffered;
616 dc->remaining = remaining;
617 dc->useful = useful;
618
619 return num;
620}
621
622/* Q_BUFFER_HANDLE event and buffer data for the given handle.
623 Return whether or not the buffering should continue explicitly. */
624static bool buffer_handle(int handle_id, size_t to_buffer)
625{
626 logf("buffer_handle(%d, %lu)", handle_id, (unsigned long)to_buffer);
627 struct memory_handle *h = find_handle(handle_id);
628 if (!h)
629 return true;
630
631 logf(" type: %d", (int)h->type);
632
633 if (h->end >= h->filesize) {
634 /* nothing left to buffer */
635 return true;
636 }
637
638 if (h->fd < 0) { /* file closed, reopen */
639 if (h->path[0] != '\0')
640 h->fd = open(h->path, O_RDONLY);
641
642 if (h->fd < 0) {
643 /* could not open the file, truncate it where it is */
644 h->filesize = h->end;
645 return true;
646 }
647
648 if (h->start)
649 lseek(h->fd, h->start, SEEK_SET);
650 }
651
652 trigger_cpu_boost();
653
654 if (h->type == TYPE_ID3) {
655 get_metadata_ex(ringbuf_ptr(h->data),
656 h->fd, h->path, METADATA_CLOSE_FD_ON_EXIT);
657 h->fd = -1; /* with above, behavior same as close_fd */
658 h->widx = ringbuf_add(h->data, h->filesize);
659 h->end = h->filesize;
660 send_event(BUFFER_EVENT_FINISHED, &handle_id);
661 return true;
662 }
663
664 bool stop = false;
665 while (h->end < h->filesize && !stop)
666 {
667 /* max amount to copy */
668 size_t widx = h->widx;
669 ssize_t copy_n = h->filesize - h->end;
670 copy_n = MIN(copy_n, BUFFERING_DEFAULT_FILECHUNK);
671 copy_n = MIN(copy_n, (off_t)(buffer_len - widx));
672
673 mutex_lock(&llist_mutex);
674
675 /* read only up to available space and stop if it would overwrite
676 the next handle; stop one byte early to avoid empty/full alias
677 (or else do more complicated arithmetic to differentiate) */
678 size_t next = ringbuf_offset(HLIST_NEXT(h) ?: HLIST_FIRST);
679 ssize_t overlap = ringbuf_add_cross_full(widx, copy_n, next);
680
681 mutex_unlock(&llist_mutex);
682
683 if (overlap > 0) {
684 stop = true;
685 copy_n -= overlap;
686 }
687
688 if (copy_n <= 0)
689 return false; /* no space for read */
690
691 /* rc is the actual amount read */
692 ssize_t rc = read(h->fd, ringbuf_ptr(widx), copy_n);
693
694 if (rc <= 0) {
695 /* Some kind of filesystem error, maybe recoverable if not codec */
696 if (h->type == TYPE_CODEC) {
697 logf("Partial codec");
698 break;
699 }
700
701 logf("File ended %lu bytes early\n",
702 (unsigned long)(h->filesize - h->end));
703 h->filesize = h->end;
704 break;
705 }
706
707 /* Advance buffer and make data available to users */
708 h->widx = ringbuf_add(widx, rc);
709 h->end += rc;
710
711 yield();
712
713 if (to_buffer == 0) {
714 /* Normal buffering - check queue */
715 if (!queue_empty(&buffering_queue))
716 break;
717 } else {
718 if (to_buffer <= (size_t)rc)
719 break; /* Done */
720 to_buffer -= rc;
721 }
722 }
723
724 if (h->end >= h->filesize) {
725 /* finished buffering the file */
726 close_fd(&h->fd);
727 send_event(BUFFER_EVENT_FINISHED, &handle_id);
728 }
729
730 return !stop;
731}
732
733/* Close the specified handle id and free its allocation. */
734/* Q_CLOSE_HANDLE */
735static bool close_handle(int handle_id)
736{
737 mutex_lock(&llist_mutex);
738 struct memory_handle *h = find_handle(handle_id);
739
740 /* If the handle is not found, it is closed */
741 if (h) {
742 close_fd(&h->fd);
743 unlink_handle(h);
744 }
745
746 mutex_unlock(&llist_mutex);
747 return true;
748}
749
750/* Free buffer space by moving the handle struct right before the useful
751 part of its data buffer or by moving all the data. */
752static struct memory_handle * shrink_handle(struct memory_handle *h)
753{
754 if (!h)
755 return NULL;
756
757 if (h->type == TYPE_PACKET_AUDIO) {
758 /* only move the handle struct */
759 /* data is pinned by default - if we start moving packet audio,
760 the semantics will determine whether or not data is movable
761 but the handle will remain movable in either case */
762 size_t delta = ringbuf_sub_empty(h->ridx, h->data);
763
764 /* The value of delta might change for alignment reasons */
765 if (!move_handle(&h, &delta, 0))
766 return h;
767
768 h->data = ringbuf_add(h->data, delta);
769 h->start += delta;
770 } else {
771 /* metadata handle: we can move all of it */
772 if (h->pinned || !HLIST_NEXT(h))
773 return h; /* Pinned, last handle */
774
775 size_t data_size = h->filesize - h->start;
776 uintptr_t handle_distance =
777 ringbuf_sub_empty(ringbuf_offset(HLIST_NEXT(h)), h->data);
778 size_t delta = handle_distance - data_size;
779
780 /* The value of delta might change for alignment reasons */
781 if (!move_handle(&h, &delta, data_size))
782 return h;
783
784 size_t olddata = h->data;
785 h->data = ringbuf_add(h->data, delta);
786 h->ridx = ringbuf_add(h->ridx, delta);
787 h->widx = ringbuf_add(h->widx, delta);
788
789 switch (h->type)
790 {
791 case TYPE_ID3:
792 if (h->filesize != sizeof(struct mp3entry))
793 break;
794 /* when moving an mp3entry we need to readjust its pointers */
795 adjust_mp3entry(ringbuf_ptr(h->data), ringbuf_ptr(h->data),
796 ringbuf_ptr(olddata));
797 break;
798
799 case TYPE_BITMAP:
800 /* adjust the bitmap's pointer */
801 ((struct bitmap *)ringbuf_ptr(h->data))->data =
802 ringbuf_ptr(h->data + sizeof(struct bitmap));
803 break;
804
805 default:
806 break;
807 }
808 }
809
810 return h;
811}
812
813/* Fill the buffer by buffering as much data as possible for handles that still
814 have data left to buffer
815 Return whether or not to continue filling after this */
816static bool fill_buffer(void)
817{
818 logf("fill_buffer()");
819 mutex_lock(&llist_mutex);
820
821 struct memory_handle *m = shrink_handle(HLIST_FIRST);
822
823 mutex_unlock(&llist_mutex);
824
825 while (queue_empty(&buffering_queue) && m) {
826 if (m->end < m->filesize && !buffer_handle(m->id, 0)) {
827 m = NULL;
828 break;
829 }
830 m = HLIST_NEXT(m);
831 }
832
833 if (m) {
834 return true;
835 } else {
836 /* only spin the disk down if the filling wasn't interrupted by an
837 event arriving in the queue. */
838 storage_sleep();
839 return false;
840 }
841}
842
843#ifdef HAVE_ALBUMART
844/* Given a file descriptor to a bitmap file, write the bitmap data to the
845 buffer, with a struct bitmap and the actual data immediately following.
846 Return value is the total size (struct + data). */
847static int load_image(int fd, const char *path,
848 struct bufopen_bitmap_data *data,
849 size_t bufidx, size_t max_size)
850{
851 (void)path;
852 int rc;
853 struct bitmap *bmp = ringbuf_ptr(bufidx);
854 struct dim *dim = data->dim;
855 struct mp3_albumart *aa = data->embedded_albumart;
856
857 /* get the desired image size */
858 bmp->width = dim->width, bmp->height = dim->height;
859 /* FIXME: alignment may be needed for the data buffer. */
860 bmp->data = ringbuf_ptr(bufidx + sizeof(struct bitmap));
861
862#if (LCD_DEPTH > 1) || defined(HAVE_REMOTE_LCD) && (LCD_REMOTE_DEPTH > 1)
863 bmp->maskdata = NULL;
864#endif
865 const int format = FORMAT_NATIVE | FORMAT_DITHER |
866 FORMAT_RESIZE | FORMAT_KEEP_ASPECT;
867#ifdef HAVE_JPEG
868 if (aa != NULL) {
869 lseek(fd, aa->pos, SEEK_SET);
870 rc = clip_jpeg_fd(fd, aa->type, aa->size, bmp, (int)max_size, format, NULL);
871 }
872 else if (strcmp(path + strlen(path) - 4, ".bmp"))
873 rc = read_jpeg_fd(fd, bmp, (int)max_size, format, NULL);
874 else
875#endif
876 rc = read_bmp_fd(fd, bmp, (int)max_size, format, NULL);
877
878 return rc + (rc > 0 ? sizeof(struct bitmap) : 0);
879}
880#endif /* HAVE_ALBUMART */
881
882
883/*
884MAIN BUFFERING API CALLS
885========================
886
887bufopen : Request the opening of a new handle for a file
888bufalloc : Open a new handle for data other than a file.
889bufclose : Close an open handle
890bufseek : Set the read pointer in a handle
891bufadvance : Move the read pointer in a handle
892bufread : Copy data from a handle into a given buffer
893bufgetdata : Give a pointer to the handle's data
894
895These functions are exported, to allow interaction with the buffer.
896They take care of the content of the structs, and rely on the linked list
897management functions for all the actual handle management work.
898*/
899
900
901/* Reserve space in the buffer for a file.
902 filename: name of the file to open
903 offset: offset at which to start buffering the file, useful when the first
904 offset bytes of the file aren't needed.
905 type: one of the data types supported (audio, image, cuesheet, others
906 user_data: user data passed possibly passed in subcalls specific to a
907 data_type (only used for image (albumart) buffering so far )
908 return value: <0 if the file cannot be opened, or one file already
909 queued to be opened, otherwise the handle for the file in the buffer
910*/
911int bufopen(const char *file, off_t offset, enum data_type type,
912 void *user_data)
913{
914 int handle_id = ERR_BUFFER_FULL;
915 size_t data;
916 struct memory_handle *h;
917
918 /* No buffer refs until after the mutex_lock call! */
919
920 if (type == TYPE_ID3) {
921 /* ID3 case: allocate space, init the handle and return. */
922 mutex_lock(&llist_mutex);
923
924 h = add_handle(H_ALLOCALL, sizeof(struct mp3entry), file, &data);
925
926 if (h) {
927 handle_id = h->id;
928
929 h->type = type;
930 h->fd = -1;
931 h->data = data;
932 h->ridx = data;
933 h->widx = data;
934 h->filesize = sizeof(struct mp3entry);
935 h->start = 0;
936 h->pos = 0;
937 h->end = 0;
938
939 link_handle(h);
940
941 /* Inform the buffering thread that we added a handle */
942 LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", handle_id);
943 queue_post(&buffering_queue, Q_HANDLE_ADDED, handle_id);
944 }
945
946 mutex_unlock(&llist_mutex);
947 return handle_id;
948 }
949 else if (type == TYPE_UNKNOWN)
950 return ERR_UNSUPPORTED_TYPE;
951#ifdef APPLICATION
952 /* Loading code from memory is not supported in application builds */
953 else if (type == TYPE_CODEC)
954 return ERR_UNSUPPORTED_TYPE;
955#endif
956 /* Other cases: there is a little more work. */
957 int fd = open(file, O_RDONLY);
958 if (fd < 0)
959 return ERR_FILE_ERROR;
960
961 size_t size = 0;
962#ifdef HAVE_ALBUMART
963 if (type == TYPE_BITMAP) {
964 /* Bitmaps are resized to the requested dimensions when loaded,
965 * so the file size should not be used as it may be too large
966 * or too small */
967 struct bufopen_bitmap_data *aa = user_data;
968 size = BM_SIZE(aa->dim->width, aa->dim->height, FORMAT_NATIVE, false);
969 size += sizeof(struct bitmap);
970
971#ifdef HAVE_JPEG
972 /* JPEG loading requires extra memory
973 * TODO: don't add unncessary overhead for .bmp images! */
974 size += JPEG_DECODE_OVERHEAD;
975#endif
976 /* resize_on_load requires space for 1 line + 2 spare lines */
977#ifdef HAVE_LCD_COLOR
978 size += sizeof(struct uint32_argb) * 3 * aa->dim->width;
979#else
980 size += sizeof(uint32_t) * 3 * aa->dim->width;
981#endif
982 }
983#endif /* HAVE_ALBUMART */
984
985 if (size == 0)
986 size = filesize(fd);
987
988 unsigned int hflags = 0;
989 if (type == TYPE_PACKET_AUDIO || type == TYPE_CODEC)
990 hflags |= H_CANWRAP;
991 /* Bitmaps need their space allocated up front */
992 if (type == TYPE_BITMAP)
993 hflags |= H_ALLOCALL;
994
995 size_t adjusted_offset = offset;
996 if (adjusted_offset > size)
997 adjusted_offset = 0;
998
999 /* Reserve extra space because alignment can move data forward */
1000 size_t padded_size = STORAGE_PAD(size - adjusted_offset);
1001
1002 mutex_lock(&llist_mutex);
1003
1004 h = add_handle(hflags, padded_size, file, &data);
1005 if (!h) {
1006 DEBUGF("%s(): failed to add handle\n", __func__);
1007 mutex_unlock(&llist_mutex);
1008 close(fd);
1009
1010 /*warn playback.c if it is trying to buffer too large of an image*/
1011 if(type == TYPE_BITMAP && padded_size >= buffer_len - 64*1024)
1012 {
1013 return ERR_BITMAP_TOO_LARGE;
1014 }
1015 return ERR_BUFFER_FULL;
1016
1017 }
1018
1019 handle_id = h->id;
1020
1021 h->type = type;
1022 h->fd = -1;
1023
1024#ifdef STORAGE_WANTS_ALIGN
1025 /* Don't bother to storage align bitmaps because they are not
1026 * loaded directly into the buffer.
1027 */
1028 if (type != TYPE_BITMAP) {
1029 /* Align to desired storage alignment */
1030 size_t alignment_pad = STORAGE_OVERLAP((uintptr_t)adjusted_offset -
1031 (uintptr_t)ringbuf_ptr(data));
1032 data = ringbuf_add(data, alignment_pad);
1033 }
1034#endif /* STORAGE_WANTS_ALIGN */
1035
1036 h->data = data;
1037 h->ridx = data;
1038 h->start = adjusted_offset;
1039 h->pos = adjusted_offset;
1040
1041#ifdef HAVE_ALBUMART
1042 if (type == TYPE_BITMAP) {
1043 /* Bitmap file: we load the data instead of the file */
1044 int rc = load_image(fd, file, user_data, data, padded_size);
1045 if (rc <= 0) {
1046 handle_id = ERR_FILE_ERROR;
1047 } else {
1048 data = ringbuf_add(data, rc);
1049 size = rc;
1050 adjusted_offset = rc;
1051 }
1052 }
1053 else
1054#endif
1055 if (type == TYPE_CUESHEET) {
1056 h->fd = fd;
1057 }
1058
1059 if (handle_id >= 0) {
1060 h->widx = data;
1061 h->filesize = size;
1062 h->end = adjusted_offset;
1063 link_handle(h);
1064 }
1065
1066 mutex_unlock(&llist_mutex);
1067
1068 if (type == TYPE_CUESHEET) {
1069 /* Immediately start buffering those */
1070 LOGFQUEUE("buffering >| Q_BUFFER_HANDLE %d", handle_id);
1071 queue_send(&buffering_queue, Q_BUFFER_HANDLE, handle_id);
1072 } else {
1073 /* Other types will get buffered in the course of normal operations */
1074 close(fd);
1075
1076 if (handle_id >= 0) {
1077 /* Inform the buffering thread that we added a handle */
1078 LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", handle_id);
1079 queue_post(&buffering_queue, Q_HANDLE_ADDED, handle_id);
1080 }
1081 }
1082
1083 logf("bufopen: new hdl %d", handle_id);
1084 return handle_id;
1085
1086 /* Currently only used for aa loading */
1087 (void)user_data;
1088}
1089
1090/* Open a new handle from data that needs to be copied from memory.
1091 src is the source buffer from which to copy data. It can be NULL to simply
1092 reserve buffer space.
1093 size is the requested size. The call will only be successful if the
1094 requested amount of data can entirely fit in the buffer without wrapping.
1095 Return value is the handle id for success or <0 for failure.
1096*/
1097int bufalloc(const void *src, size_t size, enum data_type type)
1098{
1099 if (type == TYPE_UNKNOWN)
1100 return ERR_UNSUPPORTED_TYPE;
1101
1102 int handle_id = ERR_BUFFER_FULL;
1103
1104 mutex_lock(&llist_mutex);
1105
1106 size_t data;
1107 struct memory_handle *h = add_handle(H_ALLOCALL, size, NULL, &data);
1108
1109 if (h) {
1110 handle_id = h->id;
1111
1112 if (src) {
1113 if (type == TYPE_ID3 && size == sizeof(struct mp3entry)) {
1114 /* specially take care of struct mp3entry */
1115 copy_mp3entry(ringbuf_ptr(data), src);
1116 } else {
1117 memcpy(ringbuf_ptr(data), src, size);
1118 }
1119 }
1120
1121 h->type = type;
1122 h->fd = -1;
1123 h->data = data;
1124 h->ridx = data;
1125 h->widx = ringbuf_add(data, size);
1126 h->filesize = size;
1127 h->start = 0;
1128 h->pos = 0;
1129 h->end = size;
1130
1131 link_handle(h);
1132 }
1133
1134 mutex_unlock(&llist_mutex);
1135
1136 logf("bufalloc: new hdl %d", handle_id);
1137 return handle_id;
1138}
1139
1140/* Close the handle. Return true for success and false for failure */
1141bool bufclose(int handle_id)
1142{
1143 logf("bufclose(%d)", handle_id);
1144
1145 if (handle_id <= 0) {
1146 return true;
1147 }
1148
1149 LOGFQUEUE("buffering >| Q_CLOSE_HANDLE %d", handle_id);
1150 return queue_send(&buffering_queue, Q_CLOSE_HANDLE, handle_id);
1151}
1152
1153/* Backend to bufseek and bufadvance. Call only in response to
1154 Q_REBUFFER_HANDLE! */
1155static void rebuffer_handle(int handle_id, off_t newpos)
1156{
1157 struct memory_handle *h = find_handle(handle_id);
1158 if (!h) {
1159 queue_reply(&buffering_queue, ERR_HANDLE_NOT_FOUND);
1160 return;
1161 }
1162
1163 /* Check that we still need to do this since the request could have
1164 possibly been met by this time */
1165 if (newpos >= h->start && newpos <= h->end) {
1166 h->ridx = ringbuf_add(h->data, newpos - h->start);
1167 h->pos = newpos;
1168 queue_reply(&buffering_queue, 0);
1169 return;
1170 }
1171
1172 /* When seeking foward off of the buffer, if it is a short seek attempt to
1173 avoid rebuffering the whole track, just read enough to satisfy */
1174 off_t amount = newpos - h->pos;
1175
1176 if (amount > 0 && amount <= BUFFERING_DEFAULT_FILECHUNK) {
1177 h->ridx = ringbuf_add(h->data, newpos - h->start);
1178 h->pos = newpos;
1179
1180 if (buffer_handle(handle_id, amount + 1) && h->end >= h->pos) {
1181 /* It really did succeed */
1182 queue_reply(&buffering_queue, 0);
1183 buffer_handle(handle_id, 0); /* Ok, try the rest */
1184 return;
1185 }
1186 /* Data collision or other file error - must reset */
1187
1188 if (newpos > h->filesize)
1189 newpos = h->filesize; /* file truncation happened above */
1190 }
1191
1192 mutex_lock(&llist_mutex);
1193
1194 size_t next = ringbuf_offset(HLIST_NEXT(h) ?: HLIST_FIRST);
1195
1196#ifdef STORAGE_WANTS_ALIGN
1197 /* Strip alignment padding then redo */
1198 size_t new_index = ringbuf_add(ringbuf_offset(h), h->size);
1199
1200 /* Align to desired storage alignment if space permits - handle could
1201 have been shrunken too close to the following one after a previous
1202 rebuffer. */
1203 size_t alignment_pad = STORAGE_OVERLAP((uintptr_t)newpos -
1204 (uintptr_t)ringbuf_ptr(new_index));
1205
1206 if (ringbuf_add_cross_full(new_index, alignment_pad, next) > 0)
1207 alignment_pad = 0; /* Forego storage alignment this time */
1208
1209 new_index = ringbuf_add(new_index, alignment_pad);
1210#else
1211 /* Just clear the data buffer */
1212 size_t new_index = h->data;
1213#endif /* STORAGE_WANTS_ALIGN */
1214
1215 /* Reset the handle to its new position */
1216 h->ridx = h->widx = h->data = new_index;
1217 h->start = h->pos = h->end = newpos;
1218
1219 if (h->fd >= 0)
1220 lseek(h->fd, newpos, SEEK_SET);
1221
1222 off_t filerem = h->filesize - newpos;
1223 bool send = HLIST_NEXT(h) &&
1224 ringbuf_add_cross_full(new_index, filerem, next) > 0;
1225
1226 mutex_unlock(&llist_mutex);
1227
1228 if (send) {
1229 /* There isn't enough space to rebuffer all of the track from its new
1230 offset, so we ask the user to free some */
1231 DEBUGF("%s(): space is needed\n", __func__);
1232 send_event(BUFFER_EVENT_REBUFFER, &(int){ handle_id });
1233 }
1234
1235 /* Now we do the rebuffer */
1236 queue_reply(&buffering_queue, 0);
1237 buffer_handle(handle_id, 0);
1238}
1239
1240/* Backend to bufseek and bufadvance */
1241static int seek_handle(struct memory_handle *h, off_t newpos)
1242{
1243 if ((newpos < h->start || newpos >= h->end) &&
1244 (newpos < h->filesize || h->end < h->filesize)) {
1245 /* access before or after buffered data and not to end of file or file
1246 is not buffered to the end-- a rebuffer is needed. */
1247 return queue_send(&buffering_queue, Q_REBUFFER_HANDLE,
1248 (intptr_t)&(struct buf_message_data){ h->id, newpos });
1249 }
1250 else {
1251 h->ridx = ringbuf_add(h->data, newpos - h->start);
1252 h->pos = newpos;
1253 return 0;
1254 }
1255}
1256
1257/* Set reading index in handle (relatively to the start of the file).
1258 Access before the available data will trigger a rebuffer.
1259 Return 0 for success and for failure:
1260 ERR_HANDLE_NOT_FOUND if the handle wasn't found
1261 ERR_INVALID_VALUE if the new requested position was beyond the end of
1262 the file
1263*/
1264int bufseek(int handle_id, size_t newpos)
1265{
1266 struct memory_handle *h = find_handle(handle_id);
1267 if (!h)
1268 return ERR_HANDLE_NOT_FOUND;
1269
1270 if (newpos > (size_t)h->filesize)
1271 return ERR_INVALID_VALUE;
1272
1273 return seek_handle(h, newpos);
1274}
1275
1276/* Advance the reading index in a handle (relatively to its current position).
1277 Return 0 for success and for failure:
1278 ERR_HANDLE_NOT_FOUND if the handle wasn't found
1279 ERR_INVALID_VALUE if the new requested position was before the beginning
1280 or beyond the end of the file
1281 */
1282int bufadvance(int handle_id, off_t offset)
1283{
1284 struct memory_handle *h = find_handle(handle_id);
1285 if (!h)
1286 return ERR_HANDLE_NOT_FOUND;
1287
1288 off_t pos = h->pos;
1289
1290 if ((offset < 0 && offset < -pos) ||
1291 (offset >= 0 && offset > h->filesize - pos))
1292 return ERR_INVALID_VALUE;
1293
1294 return seek_handle(h, pos + offset);
1295}
1296
1297/* Get the read position from the start of the file
1298 Returns the offset from byte 0 of the file and for failure:
1299 ERR_HANDLE_NOT_FOUND if the handle wasn't found
1300 */
1301off_t bufftell(int handle_id)
1302{
1303 const struct memory_handle *h = find_handle(handle_id);
1304 if (!h)
1305 return ERR_HANDLE_NOT_FOUND;
1306
1307 return h->pos;
1308}
1309
1310/* Used by bufread and bufgetdata to prepare the buffer and retrieve the
1311 * actual amount of data available for reading. It does range checks on
1312 * size and returns a valid (and explicit) amount of data for reading */
1313static struct memory_handle *prep_bufdata(int handle_id, size_t *size,
1314 bool guardbuf_limit)
1315{
1316 struct memory_handle *h = find_handle(handle_id);
1317 if (!h)
1318 return NULL;
1319
1320 if (h->pos >= h->filesize) {
1321 /* File is finished reading */
1322 *size = 0;
1323 return h;
1324 }
1325
1326 off_t realsize = *size;
1327 off_t filerem = h->filesize - h->pos;
1328
1329 if (realsize <= 0 || realsize > filerem)
1330 realsize = filerem; /* clip to eof */
1331
1332 if (guardbuf_limit && realsize > GUARD_BUFSIZE) {
1333 logf("data request > guardbuf");
1334 /* If more than the size of the guardbuf is requested and this is a
1335 * bufgetdata, limit to guard_bufsize over the end of the buffer */
1336 realsize = MIN((size_t)realsize, buffer_len - h->ridx + GUARD_BUFSIZE);
1337 /* this ensures *size <= buffer_len - h->ridx + GUARD_BUFSIZE */
1338 }
1339
1340 off_t end = h->end;
1341 off_t wait_end = h->pos + realsize;
1342
1343 if (end < wait_end && end < h->filesize) {
1344 /* Wait for the data to be ready */
1345 unsigned int request = 1;
1346
1347 do
1348 {
1349 if (--request == 0) {
1350 request = 100;
1351 /* Data (still) isn't ready; ping buffering thread */
1352 LOGFQUEUE("buffering >| Q_START_FILL %d",handle_id);
1353 queue_send(&buffering_queue, Q_START_FILL, handle_id);
1354 }
1355
1356 sleep(0);
1357 /* it is not safe for a non-buffering thread to sleep while
1358 * holding a handle */
1359 h = find_handle(handle_id);
1360 if (!h)
1361 return NULL;
1362
1363 if (h->signaled != 0)
1364 return NULL; /* Wait must be abandoned */
1365
1366 end = h->end;
1367 }
1368 while (end < wait_end && end < h->filesize);
1369
1370 filerem = h->filesize - h->pos;
1371 if (realsize > filerem)
1372 realsize = filerem;
1373 }
1374
1375 *size = realsize;
1376 return h;
1377}
1378
1379
1380/* Note: It is safe for the thread responsible for handling the rebuffer
1381 * cleanup request to call bufread or bufgetdata only when the data will
1382 * be available-- not if it could be blocked waiting for it in prep_bufdata.
1383 * It should be apparent that if said thread is being forced to wait for
1384 * buffering but has not yet responded to the cleanup request, the space
1385 * can never be cleared to allow further reading of the file because it is
1386 * not listening to callbacks any longer. */
1387
1388/* Copy data from the given handle to the dest buffer.
1389 Return the number of bytes copied or < 0 for failure (handle not found).
1390 The caller is blocked until the requested amount of data is available.
1391*/
1392ssize_t bufread(int handle_id, size_t size, void *dest)
1393{
1394 const struct memory_handle *h =
1395 prep_bufdata(handle_id, &size, false);
1396 if (!h)
1397 return ERR_HANDLE_NOT_FOUND;
1398
1399 if (h->ridx + size > buffer_len) {
1400 /* the data wraps around the end of the buffer */
1401 size_t read = buffer_len - h->ridx;
1402 memcpy(dest, ringbuf_ptr(h->ridx), read);
1403 memcpy(dest + read, ringbuf_ptr(0), size - read);
1404 } else {
1405 memcpy(dest, ringbuf_ptr(h->ridx), size);
1406 }
1407
1408 return size;
1409}
1410
1411off_t bufstripsize(int handle_id, off_t size)
1412{
1413 struct memory_handle *h = find_handle(handle_id);
1414 if (!h || h->filesize < size)
1415 return ERR_INVALID_VALUE;
1416
1417 h->filesize = size;
1418 return size;
1419}
1420
1421/* Update the "data" pointer to make the handle's data available to the caller.
1422 Return the length of the available linear data or < 0 for failure (handle
1423 not found).
1424 The caller is blocked until the requested amount of data is available.
1425 size is the amount of linear data requested. it can be 0 to get as
1426 much as possible.
1427 The guard buffer may be used to provide the requested size. This means it's
1428 unsafe to request more than the size of the guard buffer.
1429*/
1430ssize_t bufgetdata(int handle_id, size_t size, void **data)
1431{
1432 struct memory_handle *h =
1433 prep_bufdata(handle_id, &size, true);
1434 if (!h)
1435 return ERR_HANDLE_NOT_FOUND;
1436
1437 if (h->ridx + size > buffer_len) {
1438 /* the data wraps around the end of the buffer :
1439 use the guard buffer to provide the requested amount of data. */
1440 size_t copy_n = h->ridx + size - buffer_len;
1441 /* prep_bufdata ensures
1442 adjusted_size <= buffer_len - h->ridx + GUARD_BUFSIZE,
1443 so copy_n <= GUARD_BUFSIZE */
1444 memcpy(guard_buffer, ringbuf_ptr(0), copy_n);
1445 }
1446
1447 if (data)
1448 *data = ringbuf_ptr(h->ridx);
1449
1450 return size;
1451}
1452
1453/*
1454SECONDARY EXPORTED FUNCTIONS
1455============================
1456
1457buf_handle_offset
1458buf_set_base_handle
1459buf_handle_data_type
1460buf_is_handle
1461buf_pin_handle
1462buf_signal_handle
1463buf_length
1464buf_used
1465buf_set_watermark
1466buf_get_watermark
1467
1468These functions are exported, to allow interaction with the buffer.
1469They take care of the content of the structs, and rely on the linked list
1470management functions for all the actual handle management work.
1471*/
1472bool buf_is_handle(int handle_id)
1473{
1474 return find_handle(handle_id) != NULL;
1475}
1476
1477int buf_handle_data_type(int handle_id)
1478{
1479 const struct memory_handle *h = find_handle(handle_id);
1480 if (!h)
1481 return ERR_HANDLE_NOT_FOUND;
1482 return h->type;
1483}
1484
1485off_t buf_filesize(int handle_id)
1486{
1487 const struct memory_handle *h = find_handle(handle_id);
1488 if (!h)
1489 return ERR_HANDLE_NOT_FOUND;
1490 return h->filesize;
1491}
1492
1493off_t buf_handle_offset(int handle_id)
1494{
1495 const struct memory_handle *h = find_handle(handle_id);
1496 if (!h)
1497 return ERR_HANDLE_NOT_FOUND;
1498 return h->start;
1499}
1500
1501off_t buf_handle_remaining(int handle_id)
1502{
1503 const struct memory_handle *h = find_handle(handle_id);
1504 if (!h)
1505 return ERR_HANDLE_NOT_FOUND;
1506 return h->filesize - h->end;
1507}
1508
1509bool buf_pin_handle(int handle_id, bool pin)
1510{
1511 struct memory_handle *h = find_handle(handle_id);
1512 if (!h)
1513 return false;
1514
1515 if (pin) {
1516 h->pinned++;
1517 } else if (h->pinned > 0) {
1518 h->pinned--;
1519 }
1520
1521 return true;
1522}
1523
1524bool buf_signal_handle(int handle_id, bool signal)
1525{
1526 struct memory_handle *h = find_handle(handle_id);
1527 if (!h)
1528 return false;
1529
1530 h->signaled = signal ? 1 : 0;
1531 return true;
1532}
1533
1534/* Return the size of the ringbuffer */
1535size_t buf_length(void)
1536{
1537 return buffer_len;
1538}
1539
1540/* Set the handle from which useful data is counted */
1541void buf_set_base_handle(int handle_id)
1542{
1543 mutex_lock(&llist_mutex);
1544 base_handle_id = handle_id;
1545 mutex_unlock(&llist_mutex);
1546}
1547
1548/* Return the amount of buffer space used */
1549size_t buf_used(void)
1550{
1551 mutex_lock(&llist_mutex);
1552 size_t used = bytes_used();
1553 mutex_unlock(&llist_mutex);
1554 return used;
1555}
1556
1557void buf_set_watermark(size_t bytes)
1558{
1559 conf_watermark = bytes;
1560}
1561
1562size_t buf_get_watermark(void)
1563{
1564 return BUF_WATERMARK;
1565}
1566
1567/** -- buffer thread helpers -- **/
1568static void shrink_buffer(void)
1569{
1570 logf("shrink_buffer()");
1571
1572 mutex_lock(&llist_mutex);
1573
1574 for (struct memory_handle *h = HLIST_LAST; h; h = HLIST_PREV(h)) {
1575 h = shrink_handle(h);
1576 }
1577
1578 mutex_unlock(&llist_mutex);
1579}
1580
1581static void NORETURN_ATTR buffering_thread(void)
1582{
1583 bool filling = false;
1584 struct queue_event ev;
1585
1586 while (true)
1587 {
1588 if (num_handles > 0) {
1589 if (!filling) {
1590 cancel_cpu_boost();
1591 }
1592 queue_wait_w_tmo(&buffering_queue, &ev, filling ? 1 : HZ/2);
1593 } else {
1594 filling = false;
1595 cancel_cpu_boost();
1596 queue_wait(&buffering_queue, &ev);
1597 }
1598
1599 switch (ev.id)
1600 {
1601 case Q_START_FILL:
1602 LOGFQUEUE("buffering < Q_START_FILL %d", (int)ev.data);
1603 shrink_buffer();
1604 queue_reply(&buffering_queue, 1);
1605 if (buffer_handle((int)ev.data, 0)) {
1606 filling = true;
1607 }
1608 else if (num_handles > 0 && conf_watermark > 0) {
1609 update_data_counters(NULL);
1610 if (data_counters.useful >= BUF_WATERMARK) {
1611 send_event(BUFFER_EVENT_BUFFER_LOW, NULL);
1612 }
1613 }
1614 break;
1615
1616 case Q_BUFFER_HANDLE:
1617 LOGFQUEUE("buffering < Q_BUFFER_HANDLE %d", (int)ev.data);
1618 queue_reply(&buffering_queue, 1);
1619 buffer_handle((int)ev.data, 0);
1620 break;
1621
1622 case Q_REBUFFER_HANDLE:
1623 {
1624 struct buf_message_data *parm =
1625 (struct buf_message_data *)ev.data;
1626 LOGFQUEUE("buffering < Q_REBUFFER_HANDLE %d %ld",
1627 parm->handle_id, parm->data);
1628 rebuffer_handle(parm->handle_id, parm->data);
1629 break;
1630 }
1631
1632 case Q_CLOSE_HANDLE:
1633 LOGFQUEUE("buffering < Q_CLOSE_HANDLE %d", (int)ev.data);
1634 queue_reply(&buffering_queue, close_handle((int)ev.data));
1635 break;
1636
1637 case Q_HANDLE_ADDED:
1638 LOGFQUEUE("buffering < Q_HANDLE_ADDED %d", (int)ev.data);
1639 /* A handle was added: the disk is spinning, so we can fill */
1640 filling = true;
1641 break;
1642
1643 case SYS_TIMEOUT:
1644 LOGFQUEUE_SYS_TIMEOUT("buffering < SYS_TIMEOUT");
1645 break;
1646 }
1647
1648 if (num_handles == 0 || !queue_empty(&buffering_queue))
1649 continue;
1650
1651 update_data_counters(NULL);
1652
1653 if (filling) {
1654 filling = data_counters.remaining > 0 ? fill_buffer() : false;
1655 } else if (ev.id == SYS_TIMEOUT) {
1656 if (data_counters.useful < BUF_WATERMARK) {
1657 /* The buffer is low and we're idle, just watching the levels
1658 - call the callbacks to get new data */
1659 send_event(BUFFER_EVENT_BUFFER_LOW, NULL);
1660
1661 /* Continue anything else we haven't finished - it might
1662 get booted off or stop early because the receiver hasn't
1663 had a chance to clear anything yet */
1664 if (data_counters.remaining > 0) {
1665 shrink_buffer();
1666 filling = fill_buffer();
1667 }
1668 }
1669 }
1670 }
1671}
1672
1673void INIT_ATTR buffering_init(void)
1674{
1675 mutex_init(&llist_mutex);
1676
1677 /* Thread should absolutely not respond to USB because if it waits first,
1678 then it cannot properly service the handles and leaks will happen -
1679 this is a worker thread and shouldn't need to care about any system
1680 notifications.
1681 ***
1682 Whoever is using buffering should be responsible enough to clear all
1683 the handles at the right time. */
1684 queue_init(&buffering_queue, false);
1685 buffering_thread_id = create_thread( buffering_thread, buffering_stack,
1686 sizeof(buffering_stack), CREATE_THREAD_FROZEN,
1687 buffering_thread_name IF_PRIO(, PRIORITY_BUFFERING)
1688 IF_COP(, CPU));
1689
1690 queue_enable_queue_send(&buffering_queue, &buffering_queue_sender_list,
1691 buffering_thread_id);
1692}
1693
1694/* Initialise the buffering subsystem */
1695bool buffering_reset(char *buf, size_t buflen)
1696{
1697 /* Wraps of storage-aligned data must also be storage aligned,
1698 thus buf and buflen must be a aligned to an integer multiple of
1699 the storage alignment */
1700
1701 if (buf) {
1702 buflen -= MIN(buflen, GUARD_BUFSIZE);
1703
1704 STORAGE_ALIGN_BUFFER(buf, buflen);
1705
1706 if (!buf || !buflen)
1707 return false;
1708 } else {
1709 buflen = 0;
1710 }
1711
1712 send_event(BUFFER_EVENT_BUFFER_RESET, NULL);
1713
1714 /* If handles weren't closed above, just do it */
1715 struct memory_handle *h;
1716 while ((h = HLIST_FIRST)) {
1717 bufclose(h->id);
1718 }
1719
1720 buffer = buf;
1721 buffer_len = buflen;
1722 guard_buffer = buf + buflen;
1723
1724 lld_init(&handle_list);
1725 lld_init(&mru_cache);
1726
1727 num_handles = 0;
1728 base_handle_id = -1;
1729
1730 /* Set the high watermark as 75% full...or 25% empty :)
1731 This is the greatest fullness that will trigger low-buffer events
1732 no matter what the setting because high-bitrate files can have
1733 ludicrous margins that even exceed the buffer size - most common
1734 with a huge anti-skip buffer but even without that setting,
1735 staying constantly active in buffering is pointless */
1736 high_watermark = 3*buflen / 4;
1737
1738 thread_thaw(buffering_thread_id);
1739
1740 return true;
1741}
1742
1743void buffering_get_debugdata(struct buffering_debug *dbgdata)
1744{
1745 struct data_counters dc;
1746 dbgdata->num_handles = update_data_counters(&dc);
1747 dbgdata->data_rem = dc.remaining;
1748 dbgdata->buffered_data = dc.buffered;
1749 dbgdata->useful_data = dc.useful;
1750 dbgdata->watermark = BUF_WATERMARK;
1751}