A modern Music Player Daemon based on Rockbox open source high quality audio player
libadwaita audio rust zig deno mpris rockbox mpd
at master 445 lines 14 kB view raw
1/*************************************************************************** 2 * __________ __ ___. 3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___ 4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / 5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < 6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ 7 * \/ \/ \/ \/ \/ 8 * $Id$ 9 * 10 * Copyright (C) 2007 by Michael Sevakis 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License 14 * as published by the Free Software Foundation; either version 2 15 * of the License, or (at your option) any later version. 16 * 17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY 18 * KIND, either express or implied. 19 * 20 ****************************************************************************/ 21 22#include "config.h" 23 24 .syntax unified 25 26 .global mpeg2_idct_copy 27 .type mpeg2_idct_copy, %function 28 .global mpeg2_idct_add 29 .type mpeg2_idct_add, %function 30 31 32/* Custom calling convention: 33 * r0 contains block pointer and is non-volatile 34 * all non-volatile c context saved and restored on its behalf 35 */ 36.idct: 37 add r12, r0, #128 381: 39 ldrsh r1, [r0, #0] /* d0 */ 40 ldrsh r2, [r0, #2] /* d1 */ 41 ldrsh r3, [r0, #4] /* d2 */ 42 ldrsh r4, [r0, #6] /* d3 */ 43 ldrsh r5, [r0, #8] /* d0 */ 44 ldrsh r6, [r0, #10] /* d1 */ 45 ldrsh r7, [r0, #12] /* d2 */ 46 ldrsh r8, [r0, #14] /* d3 */ 47 orrs r9, r2, r3 48 orrseq r9, r4, r5 49 orrseq r9, r6, r7 50 cmpeq r8, #0 51 bne 2f 52 mov r1, r1, asl #15 53 bic r1, r1, #0x8000 54 orr r1, r1, r1, lsr #16 55 str r1, [r0], #4 56 str r1, [r0], #4 57 str r1, [r0], #4 58 str r1, [r0], #4 59 cmp r0, r12 60 blo 1b 61 b 3f 622: 63 mov r1, r1, asl #11 /* r1 = d0 = (block[0] << 11) + 2048 */ 64 add r1, r1, #2048 65 add r1, r1, r3, asl #11 /* r1 = t0 = d0 + (block[2] << 11) */ 66 sub r3, r1, r3, asl #12 /* r3 = t1 = d0 - (block[2] << 11) */ 67 68 add r9, r2, r4 /* r9 = tmp = (d1+d3)*(1108/4) */ 69 add r10, r9, r9, asl #2 70 add r10, r10, r9, asl #4 71 add r9, r10, r9, asl #8 72 73 add r10, r2, r2, asl #4 /* r2 = t2 = tmp + (d1*(1568/32)*8) */ 74 add r2, r10, r2, asl #5 75 add r2, r9, r2, asl #3 76 77 add r10, r4, r4, asl #2 /* r4 = t3 = tmp - (d3*(3784/8)*2) */ 78 rsb r10, r10, r4, asl #6 79 add r4, r4, r10, asl #3 80 sub r4, r9, r4, asl #1 81 /* t2 & t3 are 1/4 final value here */ 82 add r1, r1, r2, asl #2 /* r1 = a0 = t0 + t2 */ 83 sub r2, r1, r2, asl #3 /* r2 = a3 = t0 - t2 */ 84 add r3, r3, r4, asl #2 /* r3 = a1 = t1 + t3 */ 85 sub r4, r3, r4, asl #3 /* r4 = a2 = t1 - t3 */ 86 87 add r9, r8, r5 /* r9 = tmp = 565*(d3 + d0) */ 88 add r10, r9, r9, asl #4 89 add r10, r10, r10, asl #5 90 add r9, r10, r9, asl #2 91 92 add r10, r5, r5, asl #4 /* r5 = t0 = tmp + (((2276/4)*d0)*4) */ 93 add r10, r10, r10, asl #5 94 add r5, r10, r5, asl #3 95 add r5, r9, r5, asl #2 96 97 add r10, r8, r8, asl #2 /* r8 = t1 = tmp - (((3406/2)*d3)*2) */ 98 add r10, r10, r10, asl #4 99 add r10, r10, r8, asl #7 100 rsb r8, r8, r10, asl #3 101 sub r8, r9, r8, asl #1 102 103 add r9, r6, r7 /* r9 = tmp = (2408/8)*(d1 + d2) */ 104 add r10, r9, r9, asl #3 105 add r10, r10, r10, asl #5 106 add r9, r10, r9, asl #2 107 108 add r10, r7, r7, asl #3 /* r7 = t2 = (tmp*8) - 799*d2 */ 109 add r10, r10, r7, asl #4 110 rsb r7, r7, r10, asl #5 111 rsb r7, r7, r9, asl #3 112 113 sub r10, r6, r6, asl #4 /* r6 = t3 = (tmp*8) - 4017*d1 */ 114 sub r10, r10, r6, asl #6 115 add r10, r10, r6, asl #12 116 add r6, r10, r6 117 rsb r6, r6, r9, asl #3 118 /* t0 = r5, t1 = r8, t2 = r7, t3 = r6*/ 119 add r9, r5, r7 /* r9 = b0 = t0 + t2 */ 120 add r10, r8, r6 /* r10 = b3 = t1 + t3 */ 121 sub r5, r5, r7 /* t0 -= t2 */ 122 sub r8, r8, r6 /* t1 -= t3 */ 123 add r6, r5, r8 /* r6 = t0 + t1 */ 124 sub r7, r5, r8 /* r7 = t0 - t1 */ 125 126 add r11, r6, r6, asr #2 /* r6 = b1 = r6*(181/128) */ 127 add r11, r11, r11, asr #5 128 add r6, r11, r6, asr #3 129 add r11, r7, r7, asr #2 /* r7 = b2 = r7*(181/128) */ 130 add r11, r11, r11, asr #5 131 add r7, r11, r7, asr #3 132 /* r1 = a0, r3 = a1, r4 = a2, r2 = a3 */ 133 /* r9 = b0, r6 = b1*2, r7 = b2*2, r10 = b3 */ 134 add r5, r1, r9 /* block[0] = (a0 + b0) >> 12 */ 135 mov r5, r5, asr #12 136 strh r5, [r0], #2 137 add r8, r3, r6, asr #1 /* block[1] = (a1 + b1) >> 12 */ 138 mov r8, r8, asr #12 139 strh r8, [r0], #2 140 add r5, r4, r7, asr #1 /* block[2] = (a2 + b2) >> 12 */ 141 mov r5, r5, asr #12 142 strh r5, [r0], #2 143 add r8, r2, r10 /* block[3] = (a3 + b3) >> 12 */ 144 mov r8, r8, asr #12 145 strh r8, [r0], #2 146 sub r5, r2, r10 /* block[4] = (a3 - b3) >> 12 */ 147 mov r5, r5, asr #12 148 strh r5, [r0], #2 149 sub r8, r4, r7, asr #1 /* block[5] = (a2 - b2) >> 12 */ 150 mov r8, r8, asr #12 151 strh r8, [r0], #2 152 sub r5, r3, r6, asr #1 /* block[6] = (a1 - b1) >> 12 */ 153 mov r5, r5, asr #12 154 strh r5, [r0], #2 155 sub r8, r1, r9 /* block[7] = (a0 - b0) >> 12 */ 156 mov r8, r8, asr #12 157 strh r8, [r0], #2 158 cmp r0, r12 159 blo 1b 1603: 161 sub r0, r0, #128 162 add r12, r0, #16 1634: 164 ldrsh r1, [r0, #0*8] /* d0 */ 165 ldrsh r2, [r0, #2*8] /* d1 */ 166 ldrsh r3, [r0, #4*8] /* d2 */ 167 ldrsh r4, [r0, #6*8] /* d3 */ 168 ldrsh r5, [r0, #8*8] /* d0 */ 169 ldrsh r6, [r0, #10*8] /* d1 */ 170 ldrsh r7, [r0, #12*8] /* d2 */ 171 ldrsh r8, [r0, #14*8] /* d3 */ 172 173 mov r1, r1, asl #11 /* r1 = d0 = (block[0] << 11) + 2048 */ 174 add r1, r1, #65536 175 add r1, r1, r3, asl #11 /* r1 = t0 = d0 + d2:(block[2] << 11) */ 176 sub r3, r1, r3, asl #12 /* r3 = t1 = d0 - d2:(block[2] << 11) */ 177 178 add r9, r2, r4 /* r9 = tmp = (d1+d3)*(1108/4) */ 179 add r10, r9, r9, asl #2 180 add r10, r10, r9, asl #4 181 add r9, r10, r9, asl #8 182 183 add r11, r2, r2, asl #4 /* r2 = t2 = tmp + (d1*(1568/32)*8) */ 184 add r2, r11, r2, asl #5 185 add r2, r9, r2, asl #3 186 187 add r10, r4, r4, asl #2 /* r4 = t3 = tmp - (d3*(3784/8)*2) */ 188 rsb r10, r10, r4, asl #6 189 add r4, r4, r10, asl #3 190 sub r4, r9, r4, asl #1 191 /* t2 & t3 are 1/4 final value here */ 192 add r1, r1, r2, asl #2 /* r1 = a0 = t0 + t2 */ 193 sub r2, r1, r2, asl #3 /* r2 = a3 = t0 - t2 */ 194 add r3, r3, r4, asl #2 /* r3 = a1 = t1 + t3 */ 195 sub r4, r3, r4, asl #3 /* r4 = a2 = t1 - t3 */ 196 197 add r9, r8, r5 /* r9 = tmp = 565*(d3 + d0) */ 198 add r10, r9, r9, asl #4 199 add r10, r10, r10, asl #5 200 add r9, r10, r9, asl #2 201 202 add r10, r5, r5, asl #4 /* r5 = t0 = tmp + (((2276/4)*d0)*4) */ 203 add r10, r10, r10, asl #5 204 add r5, r10, r5, asl #3 205 add r5, r9, r5, asl #2 206 207 add r10, r8, r8, asl #2 /* r8 = t1 = tmp - (((3406/2)*d3)*2) */ 208 add r10, r10, r10, asl #4 209 add r10, r10, r8, asl #7 210 rsb r8, r8, r10, asl #3 211 sub r8, r9, r8, asl #1 212 213 add r9, r6, r7 /* r9 = tmp = (2408/8)*(d1 + d2) */ 214 add r10, r9, r9, asl #3 215 add r10, r10, r10, asl #5 216 add r9, r10, r9, asl #2 217 218 add r10, r7, r7, asl #3 /* r7 = t2 = (tmp*8) - 799*d2 */ 219 add r10, r10, r7, asl #4 220 rsb r7, r7, r10, asl #5 221 rsb r7, r7, r9, asl #3 222 223 sub r10, r6, r6, asl #4 /* r6 = t3 = (tmp*8) - 4017*d1 */ 224 sub r10, r10, r6, asl #6 225 add r10, r10, r6, asl #12 226 add r6, r10, r6 227 rsb r6, r6, r9, asl #3 228 /* t0=r5, t1=r8, t2=r7, t3=r6*/ 229 add r9, r5, r7 /* r9 = b0 = t0 + t2 */ 230 add r10, r8, r6 /* r10 = b3 = t1 + t3 */ 231 sub r5, r5, r7 /* t0 -= t2 */ 232 sub r8, r8, r6 /* t1 -= t3 */ 233 add r6, r5, r8 /* r6 = t0 + t1 */ 234 sub r7, r5, r8 /* r7 = t0 - t1 */ 235 236 add r11, r6, r6, asr #2 /* r6 = b1 = r5*(181/128) */ 237 add r11, r11, r11, asr #5 238 add r6, r11, r6, asr #3 239 add r11, r7, r7, asr #2 /* r7 = b2 = r6*(181/128) */ 240 add r11, r11, r11, asr #5 241 add r7, r11, r7, asr #3 242 /* r1 = a0, r3 = a1, r4 = a2, r2 = a3 */ 243 /* r9 = b0, r6 = b1*2, r7 = b2*2, r10 = b3 */ 244 add r5, r1, r9 /* block[0] = (a0 + b0) >> 17 */ 245 mov r5, r5, asr #17 246 strh r5, [r0, #0*8] 247 add r8, r3, r6, asr #1 /* block[1] = (a1 + b1) >> 17 */ 248 mov r8, r8, asr #17 249 strh r8, [r0, #2*8] 250 add r5, r4, r7, asr #1 /* block[2] = (a2 + b2) >> 17 */ 251 mov r5, r5, asr #17 252 strh r5, [r0, #4*8] 253 add r8, r2, r10 /* block[3] = (a3 + b3) >> 17 */ 254 mov r8, r8, asr #17 255 strh r8, [r0, #6*8] 256 sub r5, r2, r10 /* block[4] = (a3 - b3) >> 17 */ 257 mov r5, r5, asr #17 258 strh r5, [r0, #8*8] 259 sub r8, r4, r7, asr #1 /* block[5] = (a2 - b2) >> 17 */ 260 mov r8, r8, asr #17 261 strh r8, [r0, #10*8] 262 sub r5, r3, r6, asr #1 /* block[6] = (a1 - b1) >> 17 */ 263 mov r5, r5, asr #17 264 strh r5, [r0, #12*8] 265 sub r8, r1, r9 /* block[7] = (a0 - b0) >> 17 */ 266 mov r8, r8, asr #17 267 strh r8, [r0, #14*8] 268 add r0, r0, #2 269 cmp r0, r12 270 blo 4b 271 sub r0, r0, #16 272 bx lr 273 274mpeg2_idct_copy: 275 stmfd sp!, { r1-r2, r4-r11, lr } 276 bl .idct 277 ldmfd sp!, { r1-r2 } 278 mov r11, #0 279 add r12, r0, #128 2801: 281 ldrsh r3, [r0, #0] 282 ldrsh r4, [r0, #2] 283 ldrsh r5, [r0, #4] 284 ldrsh r6, [r0, #6] 285 ldrsh r7, [r0, #8] 286 ldrsh r8, [r0, #10] 287 ldrsh r9, [r0, #12] 288 ldrsh r10, [r0, #14] 289 cmp r3, #255 290 mvnhi r3, r3, asr #31 291 strb r3, [r1, #0] 292 str r11, [r0], #4 293 cmp r4, #255 294 mvnhi r4, r4, asr #31 295 strb r4, [r1, #1] 296 cmp r5, #255 297 mvnhi r5, r5, asr #31 298 strb r5, [r1, #2] 299 str r11, [r0], #4 300 cmp r6, #255 301 mvnhi r6, r6, asr #31 302 strb r6, [r1, #3] 303 cmp r7, #255 304 mvnhi r7, r7, asr #31 305 strb r7, [r1, #4] 306 str r11, [r0], #4 307 cmp r8, #255 308 mvnhi r8, r8, asr #31 309 strb r8, [r1, #5] 310 cmp r9, #255 311 mvnhi r9, r9, asr #31 312 strb r9, [r1, #6] 313 str r11, [r0], #4 314 cmp r10, #255 315 mvnhi r10, r10, asr #31 316 strb r10, [r1, #7] 317 add r1, r1, r2 318 cmp r0, r12 319 blo 1b 320 ldmpc regs=r4-r11 321 322mpeg2_idct_add: 323 cmp r0, #129 324 mov r0, r1 325 ldrsheq r1, [r0, #0] 326 bne 1f 327 and r1, r1, #0x70 328 cmp r1, #0x40 329 bne 3f 3301: 331 stmfd sp!, { r2-r11, lr } 332 bl .idct 333 ldmfd sp!, { r1-r2 } 334 mov r11, #0 335 add r12, r0, #128 3362: 337 ldrb r3, [r1, #0] 338 ldrb r4, [r1, #1] 339 ldrb r5, [r1, #2] 340 ldrb r6, [r1, #3] 341 ldrsh r7, [r0, #0] 342 ldrsh r8, [r0, #2] 343 ldrsh r9, [r0, #4] 344 ldrsh r10, [r0, #6] 345 add r7, r7, r3 346 ldrb r3, [r1, #4] 347 cmp r7, #255 348 mvnhi r7, r7, asr #31 349 strb r7, [r1, #0] 350 ldrsh r7, [r0, #8] 351 add r8, r8, r4 352 ldrb r4, [r1, #5] 353 cmp r8, #255 354 mvnhi r8, r8, asr #31 355 strb r8, [r1, #1] 356 ldrsh r8, [r0, #10] 357 add r9, r9, r5 358 ldrb r5, [r1, #6] 359 cmp r9, #255 360 mvnhi r9, r9, asr #31 361 strb r9, [r1, #2] 362 ldrsh r9, [r0, #12] 363 add r10, r10, r6 364 ldrb r6, [r1, #7] 365 cmp r10, #255 366 mvnhi r10, r10, asr #31 367 strb r10, [r1, #3] 368 ldrsh r10, [r0, #14] 369 str r11, [r0], #4 370 add r7, r7, r3 371 cmp r7, #255 372 mvnhi r7, r7, asr #31 373 strb r7, [r1, #4] 374 str r11, [r0], #4 375 add r8, r8, r4 376 cmp r8, #255 377 mvnhi r8, r8, asr #31 378 strb r8, [r1, #5] 379 str r11, [r0], #4 380 add r9, r9, r5 381 cmp r9, #255 382 mvnhi r9, r9, asr #31 383 strb r9, [r1, #6] 384 add r10, r10, r6 385 cmp r10, #255 386 mvnhi r10, r10, asr #31 387 strb r10, [r1, #7] 388 str r11, [r0], #4 389 add r1, r1, r2 390 cmp r0, r12 391 blo 2b 392 ldmpc regs=r4-r11 3933: 394 stmfd sp!, { r4-r5, lr } 395 ldrsh r1, [r0, #0] /* r1 = block[0] */ 396 mov r4, #0 397 strh r4, [r0, #0] /* block[0] = 0 */ 398 strh r4, [r0, #126] /* block[63] = 0 */ 399 add r1, r1, #64 /* r1 = DC << 7 */ 400 add r0, r2, r3, asl #3 4014: 402 ldrb r4, [r2, #0] 403 ldrb r5, [r2, #1] 404 ldrb r12, [r2, #2] 405 ldrb lr, [r2, #3] 406 add r4, r4, r1, asr #7 407 cmp r4, #255 408 mvnhi r4, r4, asr #31 409 strb r4, [r2, #0] 410 add r5, r5, r1, asr #7 411 cmp r5, #255 412 mvnhi r5, r5, asr #31 413 strb r5, [r2, #1] 414 add r12, r12, r1, asr #7 415 cmp r12, #255 416 mvnhi r12, r12, asr #31 417 strb r12, [r2, #2] 418 add lr, lr, r1, asr #7 419 cmp lr, #255 420 mvnhi lr, lr, asr #31 421 strb lr, [r2, #3] 422 ldrb r4, [r2, #4] 423 ldrb r5, [r2, #5] 424 ldrb r12, [r2, #6] 425 ldrb lr, [r2, #7] 426 add r4, r4, r1, asr #7 427 cmp r4, #255 428 mvnhi r4, r4, asr #31 429 strb r4, [r2, #4] 430 add r5, r5, r1, asr #7 431 cmp r5, #255 432 mvnhi r5, r5, asr #31 433 strb r5, [r2, #5] 434 add r12, r12, r1, asr #7 435 cmp r12, #255 436 mvnhi r12, r12, asr #31 437 strb r12, [r2, #6] 438 add lr, lr, r1, asr #7 439 cmp lr, #255 440 mvnhi lr, lr, asr #31 441 strb lr, [r2, #7] 442 add r2, r2, r3 443 cmp r2, r0 444 blo 4b 445 ldmpc regs=r4-r5