A decentralized music tracking and discovery platform built on AT Protocol 🎵 rocksky.app
spotify atproto lastfm musicbrainz scrobbling listenbrainz

[tracklist] implement track queue service

+2841 -1045
+12 -1
.github/workflows/ci.yml
··· 7 7 branches: 8 8 - main 9 9 jobs: 10 - fmt: 10 + tests: 11 11 runs-on: ubuntu-latest 12 12 steps: 13 13 - uses: actions/checkout@v2 14 + - name: Install dependencies 15 + run: | 16 + apt-get update && apt-get install -y \ 17 + libreadline-dev \ 18 + pkg-config \ 19 + flex \ 20 + bison \ 21 + build-essential 14 22 - name: Setup Fluent CI 15 23 uses: fluentci-io/setup-fluentci@v5 16 24 with: ··· 23 31 run: | 24 32 type cargo 25 33 cargo fmt --all --check 34 + - name: Run tests 35 + run: | 36 + cargo test -p rocksky-tracklist
+299 -252
Cargo.lock
··· 49 49 "mime", 50 50 "percent-encoding", 51 51 "pin-project-lite", 52 - "rand 0.9.1", 52 + "rand 0.9.2", 53 53 "sha1", 54 54 "smallvec", 55 55 "tokio", ··· 339 339 version = "0.2.21" 340 340 source = "registry+https://github.com/rust-lang/crates.io-index" 341 341 checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" 342 - 343 - [[package]] 344 - name = "analytics" 345 - version = "0.1.0" 346 - dependencies = [ 347 - "actix-web", 348 - "anyhow", 349 - "async-nats", 350 - "chrono", 351 - "clap", 352 - "dotenv", 353 - "duckdb", 354 - "owo-colors", 355 - "polars", 356 - "serde", 357 - "serde_json", 358 - "sqlx", 359 - "tokio", 360 - "tokio-stream", 361 - ] 362 342 363 343 [[package]] 364 344 name = "android-tzdata" ··· 1276 1256 ] 1277 1257 1278 1258 [[package]] 1279 - name = "connect" 1280 - version = "0.1.0" 1281 - dependencies = [ 1282 - "anyhow", 1283 - "async-trait", 1284 - "base64 0.22.1", 1285 - "dirs", 1286 - "futures-util", 1287 - "http 1.3.1", 1288 - "jsonrpsee", 1289 - "owo-colors", 1290 - "reqwest", 1291 - "serde", 1292 - "serde_json", 1293 - "tokio", 1294 - "tokio-stream", 1295 - "tokio-tungstenite", 1296 - "tungstenite", 1297 - ] 1298 - 1299 - [[package]] 1300 1259 name = "const-oid" 1301 1260 version = "0.9.6" 1302 1261 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 1679 1638 checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" 1680 1639 1681 1640 [[package]] 1682 - name = "dropbox" 1683 - version = "0.1.0" 1684 - dependencies = [ 1685 - "actix-web", 1686 - "aes", 1687 - "anyhow", 1688 - "async-nats", 1689 - "chrono", 1690 - "clap", 1691 - "ctr", 1692 - "dotenv", 1693 - "futures", 1694 - "hex", 1695 - "jsonwebtoken", 1696 - "lofty", 1697 - "md5", 1698 - "owo-colors", 1699 - "redis 0.29.5", 1700 - "reqwest", 1701 - "serde", 1702 - "serde_json", 1703 - "sha256", 1704 - "sqlx", 1705 - "symphonia", 1706 - "tempfile", 1707 - "tokio", 1708 - "tokio-stream", 1709 - ] 1710 - 1711 - [[package]] 1712 1641 name = "duckdb" 1713 1642 version = "1.2.2" 1714 1643 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 2181 2110 "serde_json", 2182 2111 "wasm-bindgen", 2183 2112 "web-sys", 2184 - ] 2185 - 2186 - [[package]] 2187 - name = "googledrive" 2188 - version = "0.1.0" 2189 - dependencies = [ 2190 - "actix-web", 2191 - "aes", 2192 - "anyhow", 2193 - "async-nats", 2194 - "chrono", 2195 - "clap", 2196 - "ctr", 2197 - "dotenv", 2198 - "futures", 2199 - "hex", 2200 - "jsonwebtoken", 2201 - "lofty", 2202 - "md5", 2203 - "owo-colors", 2204 - "redis 0.29.5", 2205 - "reqwest", 2206 - "serde", 2207 - "serde_json", 2208 - "serde_urlencoded", 2209 - "sha256", 2210 - "sqlx", 2211 - "symphonia", 2212 - "tempfile", 2213 - "tokio", 2214 - "tokio-stream", 2215 2113 ] 2216 2114 2217 2115 [[package]] ··· 2773 2671 checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" 2774 2672 2775 2673 [[package]] 2776 - name = "jetstream" 2777 - version = "0.1.0" 2778 - dependencies = [ 2779 - "anyhow", 2780 - "async-nats", 2781 - "chrono", 2782 - "dotenv", 2783 - "futures-util", 2784 - "owo-colors", 2785 - "reqwest", 2786 - "serde", 2787 - "serde_json", 2788 - "sha256", 2789 - "sqlx", 2790 - "tokio", 2791 - "tokio-stream", 2792 - "tokio-tungstenite", 2793 - "tungstenite", 2794 - "url", 2795 - ] 2796 - 2797 - [[package]] 2798 2674 name = "jni" 2799 2675 version = "0.21.1" 2800 2676 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 3748 3624 "pgrx-pg-config", 3749 3625 "postgres", 3750 3626 "proptest", 3751 - "rand 0.9.1", 3627 + "rand 0.9.2", 3752 3628 "regex", 3753 3629 "serde", 3754 3630 "serde_json", ··· 3866 3742 ] 3867 3743 3868 3744 [[package]] 3869 - name = "playlists" 3870 - version = "0.1.0" 3871 - dependencies = [ 3872 - "aes", 3873 - "anyhow", 3874 - "async-nats", 3875 - "chrono", 3876 - "clap", 3877 - "ctr", 3878 - "dotenv", 3879 - "duckdb", 3880 - "hex", 3881 - "jsonwebtoken", 3882 - "owo-colors", 3883 - "polars", 3884 - "reqwest", 3885 - "serde", 3886 - "serde_json", 3887 - "sha2", 3888 - "sqlx", 3889 - "tokio", 3890 - "tokio-stream", 3891 - ] 3892 - 3893 - [[package]] 3894 3745 name = "polars" 3895 3746 version = "0.46.0" 3896 3747 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 4421 4272 "hmac", 4422 4273 "md-5", 4423 4274 "memchr", 4424 - "rand 0.9.1", 4275 + "rand 0.9.2", 4425 4276 "sha2", 4426 4277 "stringprep", 4427 4278 ] ··· 4490 4341 "bitflags 2.9.1", 4491 4342 "lazy_static", 4492 4343 "num-traits", 4493 - "rand 0.9.1", 4344 + "rand 0.9.2", 4494 4345 "rand_chacha 0.9.0", 4495 4346 "rand_xorshift", 4496 4347 "regex-syntax", ··· 4583 4434 "bytes", 4584 4435 "getrandom 0.3.3", 4585 4436 "lru-slab", 4586 - "rand 0.9.1", 4437 + "rand 0.9.2", 4587 4438 "ring", 4588 4439 "rustc-hash", 4589 4440 "rustls 0.23.27", ··· 4657 4508 4658 4509 [[package]] 4659 4510 name = "rand" 4660 - version = "0.9.1" 4511 + version = "0.9.2" 4661 4512 source = "registry+https://github.com/rust-lang/crates.io-index" 4662 - checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97" 4513 + checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" 4663 4514 dependencies = [ 4664 4515 "rand_chacha 0.9.0", 4665 4516 "rand_core 0.9.3", ··· 4797 4648 checksum = "1bc42f3a12fd4408ce64d8efef67048a924e543bd35c6591c0447fda9054695f" 4798 4649 dependencies = [ 4799 4650 "arc-swap", 4651 + "bytes", 4800 4652 "combine", 4653 + "futures-util", 4801 4654 "itoa", 4802 4655 "num-bigint", 4803 4656 "percent-encoding", 4657 + "pin-project-lite", 4658 + "rustls 0.23.27", 4659 + "rustls-native-certs 0.8.1", 4804 4660 "ryu", 4805 4661 "sha1_smol", 4806 4662 "socket2", 4663 + "tokio", 4664 + "tokio-rustls 0.26.2", 4665 + "tokio-util", 4807 4666 "url", 4808 4667 ] 4809 4668 ··· 4961 4820 ] 4962 4821 4963 4822 [[package]] 4823 + name = "rocksky-analytics" 4824 + version = "0.1.0" 4825 + dependencies = [ 4826 + "actix-web", 4827 + "anyhow", 4828 + "async-nats", 4829 + "chrono", 4830 + "clap", 4831 + "dotenv", 4832 + "duckdb", 4833 + "owo-colors", 4834 + "polars", 4835 + "serde", 4836 + "serde_json", 4837 + "sqlx", 4838 + "tokio", 4839 + "tokio-stream", 4840 + ] 4841 + 4842 + [[package]] 4843 + name = "rocksky-connect" 4844 + version = "0.1.0" 4845 + dependencies = [ 4846 + "anyhow", 4847 + "async-trait", 4848 + "base64 0.22.1", 4849 + "dirs", 4850 + "futures-util", 4851 + "http 1.3.1", 4852 + "jsonrpsee", 4853 + "owo-colors", 4854 + "reqwest", 4855 + "serde", 4856 + "serde_json", 4857 + "tokio", 4858 + "tokio-stream", 4859 + "tokio-tungstenite", 4860 + "tungstenite", 4861 + ] 4862 + 4863 + [[package]] 4864 + name = "rocksky-dropbox" 4865 + version = "0.1.0" 4866 + dependencies = [ 4867 + "actix-web", 4868 + "aes", 4869 + "anyhow", 4870 + "async-nats", 4871 + "chrono", 4872 + "clap", 4873 + "ctr", 4874 + "dotenv", 4875 + "futures", 4876 + "hex", 4877 + "jsonwebtoken", 4878 + "lofty", 4879 + "md5", 4880 + "owo-colors", 4881 + "redis 0.29.5", 4882 + "reqwest", 4883 + "serde", 4884 + "serde_json", 4885 + "sha256", 4886 + "sqlx", 4887 + "symphonia", 4888 + "tempfile", 4889 + "tokio", 4890 + "tokio-stream", 4891 + ] 4892 + 4893 + [[package]] 4894 + name = "rocksky-googledrive" 4895 + version = "0.1.0" 4896 + dependencies = [ 4897 + "actix-web", 4898 + "aes", 4899 + "anyhow", 4900 + "async-nats", 4901 + "chrono", 4902 + "clap", 4903 + "ctr", 4904 + "dotenv", 4905 + "futures", 4906 + "hex", 4907 + "jsonwebtoken", 4908 + "lofty", 4909 + "md5", 4910 + "owo-colors", 4911 + "redis 0.29.5", 4912 + "reqwest", 4913 + "serde", 4914 + "serde_json", 4915 + "serde_urlencoded", 4916 + "sha256", 4917 + "sqlx", 4918 + "symphonia", 4919 + "tempfile", 4920 + "tokio", 4921 + "tokio-stream", 4922 + ] 4923 + 4924 + [[package]] 4925 + name = "rocksky-jetstream" 4926 + version = "0.1.0" 4927 + dependencies = [ 4928 + "anyhow", 4929 + "async-nats", 4930 + "chrono", 4931 + "dotenv", 4932 + "futures-util", 4933 + "owo-colors", 4934 + "reqwest", 4935 + "serde", 4936 + "serde_json", 4937 + "sha256", 4938 + "sqlx", 4939 + "tokio", 4940 + "tokio-stream", 4941 + "tokio-tungstenite", 4942 + "tungstenite", 4943 + "url", 4944 + ] 4945 + 4946 + [[package]] 4947 + name = "rocksky-playlists" 4948 + version = "0.1.0" 4949 + dependencies = [ 4950 + "aes", 4951 + "anyhow", 4952 + "async-nats", 4953 + "chrono", 4954 + "clap", 4955 + "ctr", 4956 + "dotenv", 4957 + "duckdb", 4958 + "hex", 4959 + "jsonwebtoken", 4960 + "owo-colors", 4961 + "polars", 4962 + "reqwest", 4963 + "serde", 4964 + "serde_json", 4965 + "sha2", 4966 + "sqlx", 4967 + "tokio", 4968 + "tokio-stream", 4969 + ] 4970 + 4971 + [[package]] 4972 + name = "rocksky-scrobbler" 4973 + version = "0.1.0" 4974 + dependencies = [ 4975 + "actix-limitation", 4976 + "actix-session 0.10.1", 4977 + "actix-web", 4978 + "aes", 4979 + "anyhow", 4980 + "chrono", 4981 + "ctr", 4982 + "dotenv", 4983 + "hex", 4984 + "jsonwebtoken", 4985 + "md5", 4986 + "owo-colors", 4987 + "quick-xml 0.37.5", 4988 + "rand 0.9.2", 4989 + "redis 0.29.5", 4990 + "reqwest", 4991 + "serde", 4992 + "serde_json", 4993 + "sqlx", 4994 + "tokio", 4995 + "tokio-stream", 4996 + "uuid", 4997 + ] 4998 + 4999 + [[package]] 5000 + name = "rocksky-spotify" 5001 + version = "0.1.0" 5002 + dependencies = [ 5003 + "aes", 5004 + "anyhow", 5005 + "async-nats", 5006 + "chrono", 5007 + "ctr", 5008 + "dotenv", 5009 + "hex", 5010 + "jsonwebtoken", 5011 + "owo-colors", 5012 + "redis 0.29.5", 5013 + "reqwest", 5014 + "serde", 5015 + "serde_json", 5016 + "sqlx", 5017 + "tokio", 5018 + "tokio-stream", 5019 + ] 5020 + 5021 + [[package]] 5022 + name = "rocksky-storage" 5023 + version = "0.1.0" 5024 + dependencies = [ 5025 + "actix-web", 5026 + "anyhow", 5027 + "dotenv", 5028 + "owo-colors", 5029 + "rust-s3", 5030 + "serde", 5031 + "serde_json", 5032 + "sqlx", 5033 + "tokio", 5034 + "tokio-stream", 5035 + ] 5036 + 5037 + [[package]] 5038 + name = "rocksky-tracklist" 5039 + version = "0.1.0" 5040 + dependencies = [ 5041 + "actix-web", 5042 + "anyhow", 5043 + "async-nats", 5044 + "clap", 5045 + "dotenv", 5046 + "owo-colors", 5047 + "polars", 5048 + "rand 0.9.2", 5049 + "redis 0.29.5", 5050 + "serde", 5051 + "serde_json", 5052 + "tokio", 5053 + "tokio-stream", 5054 + "uuid", 5055 + ] 5056 + 5057 + [[package]] 5058 + name = "rocksky-webscrobbler" 5059 + version = "0.1.0" 5060 + dependencies = [ 5061 + "actix-limitation", 5062 + "actix-session 0.10.1", 5063 + "actix-web", 5064 + "aes", 5065 + "anyhow", 5066 + "chrono", 5067 + "ctr", 5068 + "dotenv", 5069 + "hex", 5070 + "jsonwebtoken", 5071 + "md5", 5072 + "owo-colors", 5073 + "rand 0.9.2", 5074 + "redis 0.29.5", 5075 + "reqwest", 5076 + "serde", 5077 + "serde_json", 5078 + "sqlx", 5079 + "tokio", 5080 + "tokio-stream", 5081 + ] 5082 + 5083 + [[package]] 5084 + name = "rockskyd" 5085 + version = "0.1.0" 5086 + dependencies = [ 5087 + "anyhow", 5088 + "clap", 5089 + "dotenv", 5090 + "rocksky-analytics", 5091 + "rocksky-dropbox", 5092 + "rocksky-googledrive", 5093 + "rocksky-jetstream", 5094 + "rocksky-playlists", 5095 + "rocksky-scrobbler", 5096 + "rocksky-spotify", 5097 + "rocksky-tracklist", 5098 + "rocksky-webscrobbler", 5099 + "tokio", 5100 + ] 5101 + 5102 + [[package]] 4964 5103 name = "rsa" 4965 5104 version = "0.9.8" 4966 5105 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 5290 5429 checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" 5291 5430 5292 5431 [[package]] 5293 - name = "scrobbler" 5294 - version = "0.1.0" 5295 - dependencies = [ 5296 - "actix-limitation", 5297 - "actix-session 0.10.1", 5298 - "actix-web", 5299 - "aes", 5300 - "anyhow", 5301 - "chrono", 5302 - "ctr", 5303 - "dotenv", 5304 - "hex", 5305 - "jsonwebtoken", 5306 - "md5", 5307 - "owo-colors", 5308 - "quick-xml 0.37.5", 5309 - "rand 0.9.1", 5310 - "redis 0.29.5", 5311 - "reqwest", 5312 - "serde", 5313 - "serde_json", 5314 - "sqlx", 5315 - "tokio", 5316 - "tokio-stream", 5317 - "uuid", 5318 - ] 5319 - 5320 - [[package]] 5321 5432 name = "sct" 5322 5433 version = "0.7.1" 5323 5434 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 5650 5761 ] 5651 5762 5652 5763 [[package]] 5653 - name = "spotify" 5654 - version = "0.1.0" 5655 - dependencies = [ 5656 - "aes", 5657 - "anyhow", 5658 - "async-nats", 5659 - "chrono", 5660 - "ctr", 5661 - "dotenv", 5662 - "hex", 5663 - "jsonwebtoken", 5664 - "owo-colors", 5665 - "redis 0.29.5", 5666 - "reqwest", 5667 - "serde", 5668 - "serde_json", 5669 - "sqlx", 5670 - "tokio", 5671 - "tokio-stream", 5672 - ] 5673 - 5674 - [[package]] 5675 5764 name = "sptr" 5676 5765 version = "0.3.2" 5677 5766 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 5904 5993 version = "1.1.0" 5905 5994 source = "registry+https://github.com/rust-lang/crates.io-index" 5906 5995 checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" 5907 - 5908 - [[package]] 5909 - name = "storage" 5910 - version = "0.1.0" 5911 - dependencies = [ 5912 - "actix-web", 5913 - "anyhow", 5914 - "dotenv", 5915 - "owo-colors", 5916 - "rust-s3", 5917 - "serde", 5918 - "serde_json", 5919 - "sqlx", 5920 - "tokio", 5921 - "tokio-stream", 5922 - ] 5923 5996 5924 5997 [[package]] 5925 5998 name = "streaming-decompression" ··· 6478 6551 "pin-project-lite", 6479 6552 "postgres-protocol", 6480 6553 "postgres-types", 6481 - "rand 0.9.1", 6554 + "rand 0.9.2", 6482 6555 "socket2", 6483 6556 "tokio", 6484 6557 "tokio-util", ··· 6741 6814 "http 1.3.1", 6742 6815 "httparse", 6743 6816 "log", 6744 - "rand 0.9.1", 6817 + "rand 0.9.2", 6745 6818 "rustls 0.23.27", 6746 6819 "rustls-pki-types", 6747 6820 "sha1", ··· 6874 6947 6875 6948 [[package]] 6876 6949 name = "uuid" 6877 - version = "1.17.0" 6950 + version = "1.18.0" 6878 6951 source = "registry+https://github.com/rust-lang/crates.io-index" 6879 - checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d" 6952 + checksum = "f33196643e165781c20a5ead5582283a7dacbb87855d867fbc2df3f81eddc1be" 6880 6953 dependencies = [ 6881 6954 "getrandom 0.3.3", 6882 6955 "js-sys", ··· 7073 7146 checksum = "2853738d1cc4f2da3a225c18ec6c3721abb31961096e9dbf5ab35fa88b19cfdb" 7074 7147 dependencies = [ 7075 7148 "rustls-pki-types", 7076 - ] 7077 - 7078 - [[package]] 7079 - name = "webscrobbler" 7080 - version = "0.1.0" 7081 - dependencies = [ 7082 - "actix-limitation", 7083 - "actix-session 0.10.1", 7084 - "actix-web", 7085 - "aes", 7086 - "anyhow", 7087 - "chrono", 7088 - "ctr", 7089 - "dotenv", 7090 - "hex", 7091 - "jsonwebtoken", 7092 - "md5", 7093 - "owo-colors", 7094 - "rand 0.9.1", 7095 - "redis 0.29.5", 7096 - "reqwest", 7097 - "serde", 7098 - "serde_json", 7099 - "sqlx", 7100 - "tokio", 7101 - "tokio-stream", 7102 7149 ] 7103 7150 7104 7151 [[package]]
+4
apps/api/lexicons/player/addItemsToQueue.json
··· 24 24 "position": { 25 25 "type": "integer", 26 26 "description": "Position in the queue to insert the items at, defaults to the end if not specified" 27 + }, 28 + "shuffle": { 29 + "type": "boolean", 30 + "description": "Whether to shuffle the added items in the queue" 27 31 } 28 32 } 29 33 }
+9
apps/api/lexicons/player/playDirectory.json
··· 16 16 }, 17 17 "directoryId": { 18 18 "type": "string" 19 + }, 20 + "shuffle": { 21 + "type": "boolean" 22 + }, 23 + "recurse": { 24 + "type": "boolean" 25 + }, 26 + "position": { 27 + "type": "integer" 19 28 } 20 29 } 21 30 }
+4
apps/api/pkl/defs/player/addItemsToQueue.pkl
··· 24 24 type = "integer" 25 25 description = "Position in the queue to insert the items at, defaults to the end if not specified" 26 26 } 27 + ["shuffle"] = new BooleanType { 28 + type = "boolean" 29 + description = "Whether to shuffle the added items in the queue" 30 + } 27 31 } 28 32 } 29 33 }
+9
apps/api/pkl/defs/player/playDirectory.pkl
··· 16 16 ["directoryId"] = new StringType { 17 17 type = "string" 18 18 } 19 + ["shuffle"] = new BooleanType { 20 + type = "boolean" 21 + } 22 + ["recurse"] = new BooleanType { 23 + type = "boolean" 24 + } 25 + ["position"] = new IntegerType { 26 + type = "integer" 27 + } 19 28 } 20 29 } 21 30 }
+13
apps/api/src/lexicon/lexicons.ts
··· 1892 1892 description: 1893 1893 'Position in the queue to insert the items at, defaults to the end if not specified', 1894 1894 }, 1895 + shuffle: { 1896 + type: 'boolean', 1897 + description: 'Whether to shuffle the added items in the queue', 1898 + }, 1895 1899 }, 1896 1900 }, 1897 1901 }, ··· 2032 2036 }, 2033 2037 directoryId: { 2034 2038 type: 'string', 2039 + }, 2040 + shuffle: { 2041 + type: 'boolean', 2042 + }, 2043 + recurse: { 2044 + type: 'boolean', 2045 + }, 2046 + position: { 2047 + type: 'integer', 2035 2048 }, 2036 2049 }, 2037 2050 },
+2
apps/api/src/lexicon/types/app/rocksky/player/addItemsToQueue.ts
··· 13 13 items: string[] 14 14 /** Position in the queue to insert the items at, defaults to the end if not specified */ 15 15 position?: number 16 + /** Whether to shuffle the added items in the queue */ 17 + shuffle?: boolean 16 18 } 17 19 18 20 export type InputSchema = undefined
+3
apps/api/src/lexicon/types/app/rocksky/player/playDirectory.ts
··· 11 11 export interface QueryParams { 12 12 playerId?: string 13 13 directoryId: string 14 + shuffle?: boolean 15 + recurse?: boolean 16 + position?: number 14 17 } 15 18 16 19 export type InputSchema = undefined
+1 -1
crates/analytics/Cargo.toml
··· 1 1 [package] 2 - name = "analytics" 2 + name = "rocksky-analytics" 3 3 version = "0.1.0" 4 4 authors.workspace = true 5 5 edition.workspace = true
+2 -2
crates/analytics/src/handlers/albums.rs
··· 1 1 use std::sync::{Arc, Mutex}; 2 2 3 - use actix_web::{web, HttpRequest, HttpResponse}; 4 - use analytics::types::{ 3 + use crate::types::{ 5 4 album::{Album, GetAlbumTracksParams, GetAlbumsParams, GetTopAlbumsParams}, 6 5 track::Track, 7 6 }; 7 + use actix_web::{web, HttpRequest, HttpResponse}; 8 8 use anyhow::Error; 9 9 use duckdb::Connection; 10 10 use tokio_stream::StreamExt;
+2 -2
crates/analytics/src/handlers/artists.rs
··· 1 1 use std::sync::{Arc, Mutex}; 2 2 3 - use actix_web::{web, HttpRequest, HttpResponse}; 4 - use analytics::types::{ 3 + use crate::types::{ 5 4 album::Album, 6 5 artist::{ 7 6 Artist, GetArtistAlbumsParams, GetArtistTracksParams, GetArtistsParams, GetTopArtistsParams, 8 7 }, 9 8 track::Track, 10 9 }; 10 + use actix_web::{web, HttpRequest, HttpResponse}; 11 11 use anyhow::Error; 12 12 use duckdb::Connection; 13 13 use tokio_stream::StreamExt;
+1 -1
crates/analytics/src/handlers/scrobbles.rs
··· 1 1 use std::sync::{Arc, Mutex}; 2 2 3 + use crate::types::scrobble::{GetScrobblesParams, ScrobbleTrack}; 3 4 use actix_web::{web, HttpRequest, HttpResponse}; 4 - use analytics::types::scrobble::{GetScrobblesParams, ScrobbleTrack}; 5 5 use anyhow::Error; 6 6 use duckdb::Connection; 7 7 use tokio_stream::StreamExt;
+2 -2
crates/analytics/src/handlers/stats.rs
··· 1 1 use std::sync::{Arc, Mutex}; 2 2 3 3 use crate::read_payload; 4 - use actix_web::{web, HttpRequest, HttpResponse}; 5 - use analytics::types::{ 4 + use crate::types::{ 6 5 scrobble::{ScrobblesPerDay, ScrobblesPerMonth, ScrobblesPerYear}, 7 6 stats::{ 8 7 GetAlbumScrobblesParams, GetArtistScrobblesParams, GetScrobblesPerDayParams, ··· 10 9 GetTrackScrobblesParams, 11 10 }, 12 11 }; 12 + use actix_web::{web, HttpRequest, HttpResponse}; 13 13 use anyhow::Error; 14 14 use duckdb::Connection; 15 15 use serde_json::json;
+1 -1
crates/analytics/src/handlers/tracks.rs
··· 1 1 use std::sync::{Arc, Mutex}; 2 2 3 + use crate::types::track::{GetLovedTracksParams, GetTopTracksParams, GetTracksParams, Track}; 3 4 use actix_web::{web, HttpRequest, HttpResponse}; 4 - use analytics::types::track::{GetLovedTracksParams, GetTopTracksParams, GetTracksParams, Track}; 5 5 use anyhow::Error; 6 6 use duckdb::Connection; 7 7 use tokio_stream::StreamExt;
+42
crates/analytics/src/lib.rs
··· 1 + use std::{ 2 + env, 3 + sync::{Arc, Mutex}, 4 + }; 5 + 6 + use anyhow::Error; 7 + use duckdb::Connection; 8 + use sqlx::postgres::PgPoolOptions; 9 + 10 + use crate::core::create_tables; 11 + 12 + pub mod cmd; 13 + pub mod core; 14 + pub mod handlers; 15 + pub mod subscriber; 1 16 pub mod types; 2 17 pub mod xata; 18 + 19 + pub async fn serve() -> Result<(), Error> { 20 + let conn = Connection::open("./rocksky-analytics.ddb")?; 21 + 22 + create_tables(&conn).await?; 23 + 24 + let conn = Arc::new(Mutex::new(conn)); 25 + cmd::serve::serve(conn).await?; 26 + 27 + Ok(()) 28 + } 29 + 30 + pub async fn sync() -> Result<(), Error> { 31 + let pool = PgPoolOptions::new() 32 + .max_connections(5) 33 + .connect(&env::var("XATA_POSTGRES_URL")?) 34 + .await?; 35 + 36 + let conn = Connection::open("./rocksky-analytics.ddb")?; 37 + create_tables(&conn).await?; 38 + 39 + let conn = Arc::new(Mutex::new(conn)); 40 + 41 + cmd::sync::sync(conn, &pool).await?; 42 + 43 + Ok(()) 44 + }
+1 -1
crates/connect/Cargo.toml
··· 1 1 [package] 2 - name = "connect" 2 + name = "rocksky-connect" 3 3 version = "0.1.0" 4 4 authors = ["Tsiry Sandratraina <tsiry.sndr@rocksky.app>"] 5 5 edition = "2024"
+1 -1
crates/dropbox/Cargo.toml
··· 1 1 [package] 2 - name = "dropbox" 2 + name = "rocksky-dropbox" 3 3 version = "0.1.0" 4 4 authors.workspace = true 5 5 edition.workspace = true
+10
crates/dropbox/src/lib.rs
··· 1 + pub mod client; 2 + pub mod cmd; 3 + pub mod consts; 4 + pub mod crypto; 5 + pub mod handlers; 6 + pub mod repo; 7 + pub mod scan; 8 + pub mod token; 9 + pub mod types; 10 + pub mod xata;
+1 -1
crates/googledrive/Cargo.toml
··· 1 1 [package] 2 - name = "googledrive" 2 + name = "rocksky-googledrive" 3 3 version = "0.1.0" 4 4 authors.workspace = true 5 5 edition.workspace = true
+10
crates/googledrive/src/lib.rs
··· 1 + pub mod client; 2 + pub mod cmd; 3 + pub mod consts; 4 + pub mod crypto; 5 + pub mod handlers; 6 + pub mod repo; 7 + pub mod scan; 8 + pub mod token; 9 + pub mod types; 10 + pub mod xata;
+1 -1
crates/jetstream/Cargo.toml
··· 1 1 [package] 2 - name = "jetstream" 2 + name = "rocksky-jetstream" 3 3 version = "0.1.0" 4 4 authors.workspace = true 5 5 edition.workspace = true
+24
crates/jetstream/src/lib.rs
··· 1 + use anyhow::Error; 2 + use std::env; 3 + 4 + use subscriber::ScrobbleSubscriber; 5 + 6 + pub mod profile; 7 + pub mod repo; 8 + pub mod subscriber; 9 + pub mod types; 10 + pub mod xata; 11 + 12 + pub async fn subscribe() -> Result<(), Error> { 13 + let jetstream_server = env::var("JETSTREAM_SERVER") 14 + .unwrap_or_else(|_| "wss://jetstream2.us-west.bsky.network".to_string()); 15 + let url = format!( 16 + "{}/subscribe?wantedCollections=app.rocksky.*", 17 + jetstream_server 18 + ); 19 + let subscriber = ScrobbleSubscriber::new(&url); 20 + 21 + subscriber.run().await?; 22 + 23 + Ok(()) 24 + }
+1 -1
crates/jetstream/src/main.rs
··· 13 13 async fn main() -> Result<(), anyhow::Error> { 14 14 dotenv()?; 15 15 let jetstream_server = env::var("JETSTREAM_SERVER") 16 - .unwrap_or_else(|_| "wss://jetstream2.us-east.bsky.network".to_string()); 16 + .unwrap_or_else(|_| "wss://jetstream2.us-west.bsky.network".to_string()); 17 17 let url = format!( 18 18 "{}/subscribe?wantedCollections=app.rocksky.*", 19 19 jetstream_server
+1 -1
crates/playlists/Cargo.toml
··· 1 1 [package] 2 - name = "playlists" 2 + name = "rocksky-playlists" 3 3 version = "0.1.0" 4 4 authors.workspace = true 5 5 edition.workspace = true
+1 -1
crates/playlists/src/core.rs
··· 14 14 use crate::{ 15 15 crypto::{decrypt_aes_256_ctr, generate_token}, 16 16 types::{self, spotify_token::SpotifyTokenWithEmail}, 17 - xata::{self, track::Track}, 17 + xata::{self}, 18 18 }; 19 19 20 20 const ROCKSKY_API: &str = "https://api.rocksky.app";
+59
crates/playlists/src/lib.rs
··· 1 + use std::{ 2 + env, 3 + sync::{Arc, Mutex}, 4 + }; 5 + 6 + use anyhow::Error; 7 + use async_nats::connect; 8 + use duckdb::Connection; 9 + use owo_colors::OwoColorize; 10 + use sqlx::postgres::PgPoolOptions; 11 + 12 + use crate::{ 13 + core::{create_tables, find_spotify_users, load_users, save_playlists}, 14 + spotify::get_user_playlists, 15 + subscriber::subscribe, 16 + }; 17 + 1 18 pub mod core; 2 19 pub mod crypto; 3 20 pub mod spotify; 4 21 pub mod subscriber; 5 22 pub mod types; 6 23 pub mod xata; 24 + 25 + pub async fn start() -> Result<(), Error> { 26 + let conn = Connection::open("./rocksky-playlists.ddb")?; 27 + let conn = Arc::new(Mutex::new(conn)); 28 + create_tables(conn.clone())?; 29 + 30 + subscribe(conn.clone()).await?; 31 + 32 + let pool = PgPoolOptions::new() 33 + .max_connections(5) 34 + .connect(&env::var("XATA_POSTGRES_URL")?) 35 + .await?; 36 + let users = find_spotify_users(&pool, 0, 100).await?; 37 + 38 + load_users(conn.clone(), &pool).await?; 39 + 40 + sqlx::query(r#" 41 + CREATE UNIQUE INDEX IF NOT EXISTS user_playlists_unique_index ON user_playlists (user_id, playlist_id) 42 + "#) 43 + .execute(&pool) 44 + .await?; 45 + let conn = conn.clone(); 46 + 47 + let addr = env::var("NATS_URL").unwrap_or_else(|_| "nats://localhost:4222".to_string()); 48 + let nc = connect(&addr).await?; 49 + let nc = Arc::new(Mutex::new(nc)); 50 + println!("Connected to NATS server at {}", addr.bright_green()); 51 + 52 + for user in users { 53 + let token = user.1.clone(); 54 + let did = user.2.clone(); 55 + let user_id = user.3.clone(); 56 + let playlists = get_user_playlists(token).await?; 57 + save_playlists(&pool, conn.clone(), nc.clone(), playlists, &user_id, &did).await?; 58 + } 59 + 60 + println!("Done!"); 61 + 62 + loop { 63 + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; 64 + } 65 + }
+1 -1
crates/playlists/src/main.rs
··· 9 9 use dotenv::dotenv; 10 10 use duckdb::Connection; 11 11 use owo_colors::OwoColorize; 12 - use playlists::subscriber::subscribe; 12 + use rocksky_playlists::subscriber::subscribe; 13 13 use spotify::get_user_playlists; 14 14 use sqlx::postgres::PgPoolOptions; 15 15
+22
crates/rockskyd/Cargo.toml
··· 1 + [package] 2 + name = "rockskyd" 3 + version = "0.1.0" 4 + authors.workspace = true 5 + edition.workspace = true 6 + license.workspace = true 7 + repository.workspace = true 8 + 9 + [dependencies] 10 + clap = "4.5.31" 11 + tokio = { version = "1.43.0", features = ["full"] } 12 + dotenv = "0.15.0" 13 + anyhow = "1.0.96" 14 + rocksky-analytics = { path = "../analytics" } 15 + rocksky-dropbox = { path = "../dropbox" } 16 + rocksky-googledrive = { path = "../googledrive" } 17 + rocksky-jetstream = { path = "../jetstream" } 18 + rocksky-playlists = { path = "../playlists" } 19 + rocksky-scrobbler = { path = "../scrobbler" } 20 + rocksky-spotify = { path = "../spotify" } 21 + rocksky-tracklist = { path = "../tracklist" } 22 + rocksky-webscrobbler = { path = "../webscrobbler" }
+11
crates/rockskyd/src/cmd/analytics.rs
··· 1 + use anyhow::Error; 2 + 3 + pub async fn sync() -> Result<(), Error> { 4 + rocksky_analytics::sync().await?; 5 + Ok(()) 6 + } 7 + 8 + pub async fn serve() -> Result<(), Error> { 9 + rocksky_analytics::serve().await?; 10 + Ok(()) 11 + }
+11
crates/rockskyd/src/cmd/dropbox.rs
··· 1 + use anyhow::Error; 2 + 3 + pub async fn scan() -> Result<(), Error> { 4 + rocksky_dropbox::cmd::scan::scan().await?; 5 + Ok(()) 6 + } 7 + 8 + pub async fn serve() -> Result<(), Error> { 9 + rocksky_dropbox::cmd::serve::serve().await?; 10 + Ok(()) 11 + }
+11
crates/rockskyd/src/cmd/googledrive.rs
··· 1 + use anyhow::Error; 2 + 3 + pub async fn scan() -> Result<(), Error> { 4 + rocksky_googledrive::cmd::scan::scan().await?; 5 + Ok(()) 6 + } 7 + 8 + pub async fn serve() -> Result<(), Error> { 9 + rocksky_googledrive::cmd::serve::serve().await?; 10 + Ok(()) 11 + }
+6
crates/rockskyd/src/cmd/jetstream.rs
··· 1 + use anyhow::Error; 2 + 3 + pub async fn start_jetstream_service() -> Result<(), Error> { 4 + rocksky_jetstream::subscribe().await?; 5 + Ok(()) 6 + }
+9
crates/rockskyd/src/cmd/mod.rs
··· 1 + pub mod analytics; 2 + pub mod dropbox; 3 + pub mod googledrive; 4 + pub mod jetstream; 5 + pub mod playlist; 6 + pub mod scrobbler; 7 + pub mod spotify; 8 + pub mod tracklist; 9 + pub mod webscrobbler;
+6
crates/rockskyd/src/cmd/playlist.rs
··· 1 + use anyhow::Error; 2 + 3 + pub async fn start_playlist_service() -> Result<(), Error> { 4 + rocksky_playlists::start().await?; 5 + Ok(()) 6 + }
+6
crates/rockskyd/src/cmd/scrobbler.rs
··· 1 + use anyhow::Error; 2 + 3 + pub async fn start_scrobbler_service() -> Result<(), Error> { 4 + rocksky_scrobbler::run().await?; 5 + Ok(()) 6 + }
+6
crates/rockskyd/src/cmd/spotify.rs
··· 1 + use anyhow::Error; 2 + 3 + pub async fn start_spotify_service() -> Result<(), Error> { 4 + rocksky_spotify::run().await?; 5 + Ok(()) 6 + }
+6
crates/rockskyd/src/cmd/tracklist.rs
··· 1 + use anyhow::Error; 2 + 3 + pub async fn start_tracklist_service() -> Result<(), Error> { 4 + rocksky_tracklist::run().await?; 5 + Ok(()) 6 + }
+6
crates/rockskyd/src/cmd/webscrobbler.rs
··· 1 + use anyhow::Error; 2 + 3 + pub async fn start_webscrobbler_service() -> Result<(), Error> { 4 + rocksky_webscrobbler::start_server().await?; 5 + Ok(()) 6 + }
+82
crates/rockskyd/src/main.rs
··· 1 + use clap::Command; 2 + use dotenv::dotenv; 3 + 4 + pub mod cmd; 5 + 6 + fn cli() -> Command { 7 + Command::new("rockskyd") 8 + .version(env!("CARGO_PKG_VERSION")) 9 + .about("Rocksky Daemon Service") 10 + .subcommand( 11 + Command::new("analytics") 12 + .about("Analytics related commands") 13 + .subcommand(Command::new("sync").about("Sync data from Xata to DuckDB")) 14 + .subcommand(Command::new("serve").about("Serve the Rocksky Analytics API")), 15 + ) 16 + .subcommand( 17 + Command::new("dropbox") 18 + .about("Dropbox related commands") 19 + .subcommand(Command::new("scan").about("Scan Dropbox Music Folder")) 20 + .subcommand(Command::new("serve").about("Serve Rocksky Dropbox API")), 21 + ) 22 + .subcommand( 23 + Command::new("googledrive") 24 + .about("Google Drive related commands") 25 + .subcommand(Command::new("scan").about("Scan Google Drive Music Folder")) 26 + .subcommand(Command::new("serve").about("Serve Rocksky Google Drive API")), 27 + ) 28 + .subcommand(Command::new("jetstream").about("Start JetStream Subscriber Service")) 29 + .subcommand(Command::new("playlist").about("Playlist related commands")) 30 + .subcommand(Command::new("scrobbler").about("Start Scrobbler API")) 31 + .subcommand(Command::new("spotify").about("Start Spotify Listener Service")) 32 + .subcommand(Command::new("tracklist").about("Start User Current Track Queue Service")) 33 + .subcommand(Command::new("webscrobbler").about("Start Webscrobbler API")) 34 + } 35 + 36 + #[tokio::main] 37 + async fn main() -> Result<(), Box<dyn std::error::Error>> { 38 + dotenv().ok(); 39 + 40 + let args = cli().get_matches(); 41 + 42 + match args.subcommand() { 43 + Some(("analytics", sub_m)) => match sub_m.subcommand() { 44 + Some(("sync", _)) => cmd::analytics::sync().await?, 45 + Some(("serve", _)) => cmd::analytics::serve().await?, 46 + _ => println!("Unknown analytics command"), 47 + }, 48 + Some(("dropbox", sub_m)) => match sub_m.subcommand() { 49 + Some(("scan", _)) => cmd::dropbox::scan().await?, 50 + Some(("serve", _)) => cmd::dropbox::serve().await?, 51 + _ => println!("Unknown dropbox command"), 52 + }, 53 + Some(("googledrive", sub_m)) => match sub_m.subcommand() { 54 + Some(("scan", _)) => cmd::googledrive::scan().await?, 55 + Some(("serve", _)) => cmd::googledrive::serve().await?, 56 + _ => println!("Unknown googledrive command"), 57 + }, 58 + Some(("jetstream", _)) => { 59 + cmd::jetstream::start_jetstream_service().await?; 60 + } 61 + Some(("playlist", _)) => { 62 + cmd::playlist::start_playlist_service().await?; 63 + } 64 + Some(("scrobbler", _)) => { 65 + cmd::scrobbler::start_scrobbler_service().await?; 66 + } 67 + Some(("spotify", _)) => { 68 + cmd::spotify::start_spotify_service().await?; 69 + } 70 + Some(("tracklist", _)) => { 71 + cmd::tracklist::start_tracklist_service().await?; 72 + } 73 + Some(("webscrobbler", _)) => { 74 + cmd::webscrobbler::start_webscrobbler_service().await?; 75 + } 76 + _ => { 77 + println!("No valid subcommand was used. Use --help to see available commands."); 78 + } 79 + } 80 + 81 + Ok(()) 82 + }
+1 -1
crates/scrobbler/Cargo.toml
··· 1 1 [package] 2 - name = "scrobbler" 2 + name = "rocksky-scrobbler" 3 3 version = "0.1.0" 4 4 authors.workspace = true 5 5 edition.workspace = true
+103
crates/scrobbler/src/lib.rs
··· 1 + pub mod auth; 2 + pub mod cache; 3 + pub mod crypto; 4 + pub mod handlers; 5 + pub mod listenbrainz; 6 + pub mod musicbrainz; 7 + pub mod params; 8 + pub mod repo; 9 + pub mod response; 10 + pub mod rocksky; 11 + pub mod scrobbler; 12 + pub mod signature; 13 + pub mod spotify; 14 + pub mod types; 15 + pub mod xata; 16 + 17 + use std::{env, sync::Arc, time::Duration}; 18 + 19 + use actix_limitation::{Limiter, RateLimiter}; 20 + use actix_session::SessionExt; 21 + use actix_web::{ 22 + dev::ServiceRequest, 23 + web::{self, Data}, 24 + App, HttpServer, 25 + }; 26 + use anyhow::Error; 27 + use owo_colors::OwoColorize; 28 + use sqlx::postgres::PgPoolOptions; 29 + 30 + use crate::cache::Cache; 31 + 32 + pub const BANNER: &str = r#" 33 + ___ ___ _____ __ __ __ 34 + / | __ ______/ (_)___ / ___/______________ / /_ / /_ / /__ _____ 35 + / /| |/ / / / __ / / __ \ \__ \/ ___/ ___/ __ \/ __ \/ __ \/ / _ \/ ___/ 36 + / ___ / /_/ / /_/ / / /_/ / ___/ / /__/ / / /_/ / /_/ / /_/ / / __/ / 37 + /_/ |_\__,_/\__,_/_/\____/ /____/\___/_/ \____/_.___/_.___/_/\___/_/ 38 + 39 + This is the Rocksky Scrobbler API compatible with Last.fm AudioScrobbler API 40 + "#; 41 + 42 + pub async fn run() -> Result<(), Error> { 43 + println!("{}", BANNER.magenta()); 44 + 45 + let cache = Cache::new()?; 46 + 47 + let pool = PgPoolOptions::new() 48 + .max_connections(5) 49 + .connect(&env::var("XATA_POSTGRES_URL")?) 50 + .await?; 51 + let conn = Arc::new(pool); 52 + 53 + let host = env::var("SCROBBLE_HOST").unwrap_or_else(|_| "127.0.0.1".to_string()); 54 + let port = env::var("SCROBBLE_PORT") 55 + .unwrap_or_else(|_| "7882".to_string()) 56 + .parse::<u16>() 57 + .unwrap_or(7882); 58 + 59 + println!( 60 + "Starting Scrobble server @ {}", 61 + format!("{}:{}", host, port).green() 62 + ); 63 + 64 + let limiter = web::Data::new( 65 + Limiter::builder("redis://127.0.0.1") 66 + .key_by(|req: &ServiceRequest| { 67 + req.get_session() 68 + .get(&"session-id") 69 + .unwrap_or_else(|_| req.cookie(&"rate-api-id").map(|c| c.to_string())) 70 + }) 71 + .limit(100) 72 + .period(Duration::from_secs(60)) // 60 minutes 73 + .build() 74 + .unwrap(), 75 + ); 76 + 77 + HttpServer::new(move || { 78 + App::new() 79 + .wrap(RateLimiter::default()) 80 + .app_data(limiter.clone()) 81 + .app_data(Data::new(conn.clone())) 82 + .app_data(Data::new(cache.clone())) 83 + .service(handlers::handle_methods) 84 + .service(handlers::handle_nowplaying) 85 + .service(handlers::handle_submission) 86 + .service(listenbrainz::handlers::handle_submit_listens) 87 + .service(listenbrainz::handlers::handle_validate_token) 88 + .service(listenbrainz::handlers::handle_search_users) 89 + .service(listenbrainz::handlers::handle_get_playing_now) 90 + .service(listenbrainz::handlers::handle_get_listens) 91 + .service(listenbrainz::handlers::handle_get_listen_count) 92 + .service(listenbrainz::handlers::handle_get_artists) 93 + .service(listenbrainz::handlers::handle_get_recordings) 94 + .service(listenbrainz::handlers::handle_get_release_groups) 95 + .service(handlers::index) 96 + .service(handlers::handle_get) 97 + }) 98 + .bind((host, port))? 99 + .run() 100 + .await?; 101 + 102 + Ok(()) 103 + }
+1 -1
crates/spotify/Cargo.toml
··· 1 1 [package] 2 - name = "spotify" 2 + name = "rocksky-spotify" 3 3 version = "0.1.0" 4 4 authors.workspace = true 5 5 edition.workspace = true
+897
crates/spotify/src/lib.rs
··· 1 + use std::{ 2 + collections::HashMap, 3 + env, 4 + sync::{atomic::AtomicBool, Arc, Mutex}, 5 + thread, 6 + }; 7 + 8 + use anyhow::Error; 9 + use async_nats::connect; 10 + use owo_colors::OwoColorize; 11 + use reqwest::Client; 12 + use sqlx::{postgres::PgPoolOptions, Pool, Postgres}; 13 + use tokio_stream::StreamExt; 14 + 15 + use crate::{ 16 + cache::Cache, 17 + crypto::decrypt_aes_256_ctr, 18 + rocksky::{scrobble, update_library}, 19 + types::{ 20 + album_tracks::AlbumTracks, 21 + currently_playing::{Album, Artist, CurrentlyPlaying}, 22 + spotify_token::SpotifyTokenWithEmail, 23 + token::AccessToken, 24 + }, 25 + }; 26 + 27 + pub mod cache; 28 + pub mod crypto; 29 + pub mod rocksky; 30 + pub mod token; 31 + pub mod types; 32 + 33 + pub const BASE_URL: &str = "https://spotify-api.rocksky.app/v1"; 34 + 35 + pub async fn run() -> Result<(), Error> { 36 + let cache = Cache::new()?; 37 + let pool = PgPoolOptions::new() 38 + .max_connections(5) 39 + .connect(&env::var("XATA_POSTGRES_URL")?) 40 + .await?; 41 + 42 + let addr = env::var("NATS_URL").unwrap_or_else(|_| "nats://localhost:4222".to_string()); 43 + let nc = connect(&addr).await?; 44 + println!("Connected to NATS server at {}", addr.bright_green()); 45 + 46 + let mut sub = nc.subscribe("rocksky.spotify.user".to_string()).await?; 47 + println!("Subscribed to {}", "rocksky.spotify.user".bright_green()); 48 + 49 + let users = find_spotify_users(&pool, 0, 100).await?; 50 + println!("Found {} users", users.len().bright_green()); 51 + 52 + // Shared HashMap to manage threads and their stop flags 53 + let thread_map: Arc<Mutex<HashMap<String, Arc<AtomicBool>>>> = 54 + Arc::new(Mutex::new(HashMap::new())); 55 + 56 + // Start threads for all users 57 + for user in users { 58 + let email = user.0.clone(); 59 + let token = user.1.clone(); 60 + let did = user.2.clone(); 61 + let stop_flag = Arc::new(AtomicBool::new(false)); 62 + let cache = cache.clone(); 63 + let nc = nc.clone(); 64 + let thread_map = Arc::clone(&thread_map); 65 + 66 + thread_map 67 + .lock() 68 + .unwrap() 69 + .insert(email.clone(), Arc::clone(&stop_flag)); 70 + 71 + thread::spawn(move || { 72 + let rt = tokio::runtime::Runtime::new().unwrap(); 73 + match rt.block_on(async { 74 + watch_currently_playing(email.clone(), token, did, stop_flag, cache.clone()) 75 + .await?; 76 + Ok::<(), Error>(()) 77 + }) { 78 + Ok(_) => {} 79 + Err(e) => { 80 + println!( 81 + "{} Error starting thread for user: {} - {}", 82 + format!("[{}]", email).bright_green(), 83 + email.bright_green(), 84 + e.to_string().bright_red() 85 + ); 86 + 87 + // If there's an error, publish a message to restart the thread 88 + match rt.block_on(nc.publish("rocksky.spotify.user", email.clone().into())) { 89 + Ok(_) => { 90 + println!( 91 + "{} Published message to restart thread for user: {}", 92 + format!("[{}]", email).bright_green(), 93 + email.bright_green() 94 + ); 95 + } 96 + Err(e) => { 97 + println!( 98 + "{} Error publishing message to restart thread: {}", 99 + format!("[{}]", email).bright_green(), 100 + e.to_string().bright_red() 101 + ); 102 + } 103 + } 104 + } 105 + } 106 + }); 107 + } 108 + 109 + // Handle subscription messages 110 + while let Some(message) = sub.next().await { 111 + let user_id = String::from_utf8(message.payload.to_vec()).unwrap(); 112 + println!( 113 + "Received message to restart thread for user: {}", 114 + user_id.bright_green() 115 + ); 116 + 117 + let mut thread_map = thread_map.lock().unwrap(); 118 + 119 + // Check if the user exists in the thread map 120 + if let Some(stop_flag) = thread_map.get(&user_id) { 121 + // Stop the existing thread 122 + stop_flag.store(true, std::sync::atomic::Ordering::Relaxed); 123 + 124 + // Create a new stop flag and restart the thread 125 + let new_stop_flag = Arc::new(AtomicBool::new(false)); 126 + thread_map.insert(user_id.clone(), Arc::clone(&new_stop_flag)); 127 + 128 + let user = find_spotify_user(&pool, &user_id).await?; 129 + 130 + if user.is_none() { 131 + println!( 132 + "Spotify user not found: {}, skipping", 133 + user_id.bright_green() 134 + ); 135 + continue; 136 + } 137 + 138 + let user = user.unwrap(); 139 + 140 + let email = user.0.clone(); 141 + let token = user.1.clone(); 142 + let did = user.2.clone(); 143 + let cache = cache.clone(); 144 + 145 + thread::spawn(move || { 146 + let rt = tokio::runtime::Runtime::new().unwrap(); 147 + match rt.block_on(async { 148 + watch_currently_playing( 149 + email.clone(), 150 + token, 151 + did, 152 + new_stop_flag, 153 + cache.clone(), 154 + ) 155 + .await?; 156 + Ok::<(), Error>(()) 157 + }) { 158 + Ok(_) => {} 159 + Err(e) => { 160 + println!( 161 + "{} Error restarting thread for user: {} - {}", 162 + format!("[{}]", email).bright_green(), 163 + email.bright_green(), 164 + e.to_string().bright_red() 165 + ); 166 + } 167 + } 168 + }); 169 + 170 + println!("Restarted thread for user: {}", user_id.bright_green()); 171 + } else { 172 + println!( 173 + "No thread found for user: {}, starting new thread", 174 + user_id.bright_green() 175 + ); 176 + let user = find_spotify_user(&pool, &user_id).await?; 177 + if let Some(user) = user { 178 + let email = user.0.clone(); 179 + let token = user.1.clone(); 180 + let did = user.2.clone(); 181 + let stop_flag = Arc::new(AtomicBool::new(false)); 182 + let cache = cache.clone(); 183 + let nc = nc.clone(); 184 + 185 + thread_map.insert(email.clone(), Arc::clone(&stop_flag)); 186 + 187 + thread::spawn(move || { 188 + let rt = tokio::runtime::Runtime::new().unwrap(); 189 + match rt.block_on(async { 190 + watch_currently_playing( 191 + email.clone(), 192 + token, 193 + did, 194 + stop_flag, 195 + cache.clone(), 196 + ) 197 + .await?; 198 + Ok::<(), Error>(()) 199 + }) { 200 + Ok(_) => {} 201 + Err(e) => { 202 + println!( 203 + "{} Error starting thread for user: {} - {}", 204 + format!("[{}]", email).bright_green(), 205 + email.bright_green(), 206 + e.to_string().bright_red() 207 + ); 208 + match rt 209 + .block_on(nc.publish("rocksky.spotify.user", email.clone().into())) 210 + { 211 + Ok(_) => {} 212 + Err(e) => { 213 + println!( 214 + "{} Error publishing message to restart thread: {}", 215 + format!("[{}]", email).bright_green(), 216 + e.to_string().bright_red() 217 + ); 218 + } 219 + } 220 + } 221 + } 222 + }); 223 + } 224 + } 225 + } 226 + 227 + Ok(()) 228 + } 229 + 230 + pub async fn refresh_token(token: &str) -> Result<AccessToken, Error> { 231 + if env::var("SPOTIFY_CLIENT_ID").is_err() || env::var("SPOTIFY_CLIENT_SECRET").is_err() { 232 + panic!("Please set SPOTIFY_CLIENT_ID and SPOTIFY_CLIENT_SECRET environment variables"); 233 + } 234 + 235 + let client_id = env::var("SPOTIFY_CLIENT_ID")?; 236 + let client_secret = env::var("SPOTIFY_CLIENT_SECRET")?; 237 + 238 + let client = Client::new(); 239 + 240 + let response = client 241 + .post("https://accounts.spotify.com/api/token") 242 + .basic_auth(&client_id, Some(client_secret)) 243 + .form(&[ 244 + ("grant_type", "refresh_token"), 245 + ("refresh_token", token), 246 + ("client_id", &client_id), 247 + ]) 248 + .send() 249 + .await?; 250 + let token = response.json::<AccessToken>().await?; 251 + Ok(token) 252 + } 253 + 254 + pub async fn get_currently_playing( 255 + cache: Cache, 256 + user_id: &str, 257 + token: &str, 258 + ) -> Result<Option<(CurrentlyPlaying, bool)>, Error> { 259 + if let Ok(Some(data)) = cache.get(user_id) { 260 + println!( 261 + "{} {}", 262 + format!("[{}]", user_id).bright_green(), 263 + "Using cache".cyan() 264 + ); 265 + if data == "No content" { 266 + return Ok(None); 267 + } 268 + let decoded_data = serde_json::from_str::<CurrentlyPlaying>(&data); 269 + 270 + if decoded_data.is_err() { 271 + println!( 272 + "{} {} {}", 273 + format!("[{}]", user_id).bright_green(), 274 + "Cache is invalid".red(), 275 + data 276 + ); 277 + cache.setex(user_id, "No content", 10)?; 278 + cache.del(&format!("{}:current", user_id))?; 279 + return Ok(None); 280 + } 281 + 282 + let data: CurrentlyPlaying = decoded_data.unwrap(); 283 + // detect if the song has changed 284 + let previous = cache.get(&format!("{}:previous", user_id)); 285 + 286 + if previous.is_err() { 287 + println!( 288 + "{} redis error: {}", 289 + format!("[{}]", user_id).bright_green(), 290 + previous.unwrap_err().to_string().bright_red() 291 + ); 292 + return Ok(None); 293 + } 294 + 295 + let previous = previous.unwrap(); 296 + 297 + let changed = match previous { 298 + Some(previous) => { 299 + if serde_json::from_str::<CurrentlyPlaying>(&previous).is_err() { 300 + println!( 301 + "{} {} {}", 302 + format!("[{}]", user_id).bright_green(), 303 + "Previous cache is invalid", 304 + previous 305 + ); 306 + return Ok(None); 307 + } 308 + 309 + let previous: CurrentlyPlaying = serde_json::from_str(&previous)?; 310 + if previous.item.is_none() && data.item.is_some() { 311 + return Ok(Some((data, true))); 312 + } 313 + 314 + if previous.item.is_some() && data.item.is_none() { 315 + return Ok(Some((data, false))); 316 + } 317 + 318 + if previous.item.is_none() && data.item.is_none() { 319 + return Ok(Some((data, false))); 320 + } 321 + 322 + let previous_item = previous.item.unwrap(); 323 + let data_item = data.clone().item.unwrap(); 324 + previous_item.id != data_item.id 325 + && previous.progress_ms.unwrap_or(0) != data.progress_ms.unwrap_or(0) 326 + } 327 + _ => true, 328 + }; 329 + return Ok(Some((data, changed))); 330 + } 331 + 332 + let token = refresh_token(token).await?; 333 + let client = Client::new(); 334 + let response = client 335 + .get(format!("{}/me/player/currently-playing", BASE_URL)) 336 + .bearer_auth(token.access_token) 337 + .send() 338 + .await?; 339 + 340 + let headers = response.headers().clone(); 341 + let status = response.status().as_u16(); 342 + let data = response.text().await?; 343 + 344 + if status == 429 { 345 + println!( 346 + "{} Too many requests, retry-after {}", 347 + format!("[{}]", user_id).bright_green(), 348 + headers 349 + .get("retry-after") 350 + .unwrap() 351 + .to_str() 352 + .unwrap() 353 + .bright_green() 354 + ); 355 + return Ok(None); 356 + } 357 + 358 + let previous = cache.get(&format!("{}:previous", user_id)); 359 + if previous.is_err() { 360 + println!( 361 + "{} redis error: {}", 362 + format!("[{}]", user_id).bright_green(), 363 + previous.unwrap_err().to_string().bright_red() 364 + ); 365 + return Ok(None); 366 + } 367 + 368 + let previous = previous.unwrap(); 369 + 370 + // check if status code is 204 371 + if status == 204 { 372 + println!("No content"); 373 + match cache.setex( 374 + user_id, 375 + "No content", 376 + match previous.is_none() { 377 + true => 30, 378 + false => 10, 379 + }, 380 + ) { 381 + Ok(_) => {} 382 + Err(e) => { 383 + println!( 384 + "{} redis error: {}", 385 + format!("[{}]", user_id).bright_green(), 386 + e.to_string().bright_red() 387 + ); 388 + return Ok(None); 389 + } 390 + } 391 + match cache.del(&format!("{}:current", user_id)) { 392 + Ok(_) => {} 393 + Err(e) => { 394 + println!( 395 + "{} redis error: {}", 396 + format!("[{}]", user_id).bright_green(), 397 + e.to_string().bright_red() 398 + ); 399 + return Ok(None); 400 + } 401 + } 402 + return Ok(None); 403 + } 404 + 405 + if serde_json::from_str::<CurrentlyPlaying>(&data).is_err() { 406 + println!( 407 + "{} {} {}", 408 + format!("[{}]", user_id).bright_green(), 409 + "Invalid data received".red(), 410 + data 411 + ); 412 + match cache.setex(user_id, "No content", 10) { 413 + Ok(_) => {} 414 + Err(e) => { 415 + println!( 416 + "{} redis error: {}", 417 + format!("[{}]", user_id).bright_green(), 418 + e.to_string().bright_red() 419 + ); 420 + return Ok(None); 421 + } 422 + } 423 + match cache.del(&format!("{}:current", user_id)) { 424 + Ok(_) => {} 425 + Err(e) => { 426 + println!( 427 + "{} redis error: {}", 428 + format!("[{}]", user_id).bright_green(), 429 + e.to_string().bright_red() 430 + ); 431 + return Ok(None); 432 + } 433 + } 434 + return Ok(None); 435 + } 436 + 437 + let data = serde_json::from_str::<CurrentlyPlaying>(&data)?; 438 + 439 + match cache.setex( 440 + user_id, 441 + &serde_json::to_string(&data)?, 442 + match previous.is_none() { 443 + true => 30, 444 + false => 15, 445 + }, 446 + ) { 447 + Ok(_) => {} 448 + Err(e) => { 449 + println!( 450 + "{} redis error: {}", 451 + format!("[{}]", user_id).bright_green(), 452 + e.to_string().bright_red() 453 + ); 454 + return Ok(None); 455 + } 456 + } 457 + match cache.del(&format!("{}:current", user_id)) { 458 + Ok(_) => {} 459 + Err(e) => { 460 + println!( 461 + "{} redis error: {}", 462 + format!("[{}]", user_id).bright_green(), 463 + e.to_string().bright_red() 464 + ); 465 + return Ok(None); 466 + } 467 + } 468 + 469 + // detect if the song has changed 470 + let previous = cache.get(&format!("{}:previous", user_id)); 471 + 472 + if previous.is_err() { 473 + println!( 474 + "{} redis error: {}", 475 + format!("[{}]", user_id).bright_green(), 476 + previous.unwrap_err().to_string().bright_red() 477 + ); 478 + return Ok(None); 479 + } 480 + 481 + let previous = previous.unwrap(); 482 + let changed = match previous { 483 + Some(previous) => { 484 + if serde_json::from_str::<CurrentlyPlaying>(&previous).is_err() { 485 + println!( 486 + "{} {} {}", 487 + format!("[{}]", user_id).bright_green(), 488 + "Previous cache is invalid", 489 + previous 490 + ); 491 + return Ok(None); 492 + } 493 + 494 + let previous: CurrentlyPlaying = serde_json::from_str(&previous)?; 495 + if previous.item.is_none() || data.item.is_none() { 496 + return Ok(Some((data, false))); 497 + } 498 + 499 + let previous_item = previous.item.unwrap(); 500 + let data_item = data.clone().item.unwrap(); 501 + 502 + previous_item.id != data_item.id 503 + && previous.progress_ms.unwrap_or(0) != data.progress_ms.unwrap_or(0) 504 + } 505 + _ => false, 506 + }; 507 + 508 + // save as previous song 509 + match cache.setex( 510 + &format!("{}:previous", user_id), 511 + &serde_json::to_string(&data)?, 512 + 600, 513 + ) { 514 + Ok(_) => {} 515 + Err(e) => { 516 + println!( 517 + "{} redis error: {}", 518 + format!("[{}]", user_id).bright_green(), 519 + e.to_string().bright_red() 520 + ); 521 + return Ok(None); 522 + } 523 + } 524 + 525 + Ok(Some((data, changed))) 526 + } 527 + 528 + pub async fn get_artist( 529 + cache: Cache, 530 + artist_id: &str, 531 + token: &str, 532 + ) -> Result<Option<Artist>, Error> { 533 + if let Ok(Some(data)) = cache.get(artist_id) { 534 + return Ok(Some(serde_json::from_str(&data)?)); 535 + } 536 + 537 + let token = refresh_token(token).await?; 538 + let client = Client::new(); 539 + let response = client 540 + .get(&format!("{}/artists/{}", BASE_URL, artist_id)) 541 + .bearer_auth(token.access_token) 542 + .send() 543 + .await?; 544 + 545 + let headers = response.headers().clone(); 546 + let data = response.text().await?; 547 + 548 + if data == "Too many requests" { 549 + println!( 550 + "> retry-after {}", 551 + headers.get("retry-after").unwrap().to_str().unwrap() 552 + ); 553 + println!("> {} [get_artist]", data); 554 + return Ok(None); 555 + } 556 + 557 + match cache.setex(artist_id, &data, 20) { 558 + Ok(_) => {} 559 + Err(e) => { 560 + println!( 561 + "{} redis error: {}", 562 + format!("[{}]", artist_id).bright_green(), 563 + e.to_string().bright_red() 564 + ); 565 + return Ok(None); 566 + } 567 + } 568 + 569 + Ok(Some(serde_json::from_str(&data)?)) 570 + } 571 + 572 + pub async fn get_album(cache: Cache, album_id: &str, token: &str) -> Result<Option<Album>, Error> { 573 + if let Ok(Some(data)) = cache.get(album_id) { 574 + return Ok(Some(serde_json::from_str(&data)?)); 575 + } 576 + 577 + let token = refresh_token(token).await?; 578 + let client = Client::new(); 579 + let response = client 580 + .get(&format!("{}/albums/{}", BASE_URL, album_id)) 581 + .bearer_auth(token.access_token) 582 + .send() 583 + .await?; 584 + 585 + let headers = response.headers().clone(); 586 + let data = response.text().await?; 587 + 588 + if data == "Too many requests" { 589 + println!( 590 + "> retry-after {}", 591 + headers.get("retry-after").unwrap().to_str().unwrap() 592 + ); 593 + println!("> {} [get_album]", data); 594 + return Ok(None); 595 + } 596 + 597 + match cache.setex(album_id, &data, 20) { 598 + Ok(_) => {} 599 + Err(e) => { 600 + println!( 601 + "{} redis error: {}", 602 + format!("[{}]", album_id).bright_green(), 603 + e.to_string().bright_red() 604 + ); 605 + return Ok(None); 606 + } 607 + } 608 + 609 + Ok(Some(serde_json::from_str(&data)?)) 610 + } 611 + 612 + pub async fn get_album_tracks( 613 + cache: Cache, 614 + album_id: &str, 615 + token: &str, 616 + ) -> Result<AlbumTracks, Error> { 617 + if let Ok(Some(data)) = cache.get(&format!("{}:tracks", album_id)) { 618 + return Ok(serde_json::from_str(&data)?); 619 + } 620 + 621 + let token = refresh_token(token).await?; 622 + let client = Client::new(); 623 + let mut all_tracks = Vec::new(); 624 + let mut offset = 0; 625 + let limit = 50; 626 + 627 + loop { 628 + let response = client 629 + .get(&format!("{}/albums/{}/tracks", BASE_URL, album_id)) 630 + .bearer_auth(&token.access_token) 631 + .query(&[ 632 + ("limit", &limit.to_string()), 633 + ("offset", &offset.to_string()), 634 + ]) 635 + .send() 636 + .await?; 637 + 638 + let headers = response.headers().clone(); 639 + let data = response.text().await?; 640 + if data == "Too many requests" { 641 + println!( 642 + "> retry-after {}", 643 + headers.get("retry-after").unwrap().to_str().unwrap() 644 + ); 645 + println!("> {} [get_album_tracks]", data); 646 + continue; 647 + } 648 + 649 + let album_tracks: AlbumTracks = serde_json::from_str(&data)?; 650 + 651 + if album_tracks.items.is_empty() { 652 + break; 653 + } 654 + 655 + all_tracks.extend(album_tracks.items); 656 + offset += limit; 657 + } 658 + 659 + let all_tracks_json = serde_json::to_string(&all_tracks)?; 660 + match cache.setex(&format!("{}:tracks", album_id), &all_tracks_json, 20) { 661 + Ok(_) => {} 662 + Err(e) => { 663 + println!( 664 + "{} redis error: {}", 665 + format!("[{}]", album_id).bright_green(), 666 + e.to_string().bright_red() 667 + ); 668 + } 669 + } 670 + 671 + Ok(AlbumTracks { 672 + items: all_tracks, 673 + ..Default::default() 674 + }) 675 + } 676 + 677 + pub async fn find_spotify_users( 678 + pool: &Pool<Postgres>, 679 + offset: usize, 680 + limit: usize, 681 + ) -> Result<Vec<(String, String, String, String)>, Error> { 682 + let results: Vec<SpotifyTokenWithEmail> = sqlx::query_as( 683 + r#" 684 + SELECT * FROM spotify_tokens 685 + LEFT JOIN spotify_accounts ON spotify_tokens.user_id = spotify_accounts.user_id 686 + LEFT JOIN users ON spotify_accounts.user_id = users.xata_id 687 + LIMIT $1 OFFSET $2 688 + "#, 689 + ) 690 + .bind(limit as i64) 691 + .bind(offset as i64) 692 + .fetch_all(pool) 693 + .await?; 694 + 695 + let mut user_tokens = vec![]; 696 + 697 + for result in &results { 698 + let token = decrypt_aes_256_ctr( 699 + &result.refresh_token, 700 + &hex::decode(env::var("SPOTIFY_ENCRYPTION_KEY")?)?, 701 + )?; 702 + user_tokens.push(( 703 + result.email.clone(), 704 + token, 705 + result.did.clone(), 706 + result.user_id.clone(), 707 + )); 708 + } 709 + 710 + Ok(user_tokens) 711 + } 712 + 713 + pub async fn find_spotify_user( 714 + pool: &Pool<Postgres>, 715 + email: &str, 716 + ) -> Result<Option<(String, String, String)>, Error> { 717 + let result: Vec<SpotifyTokenWithEmail> = sqlx::query_as( 718 + r#" 719 + SELECT * FROM spotify_tokens 720 + LEFT JOIN spotify_accounts ON spotify_tokens.user_id = spotify_accounts.user_id 721 + LEFT JOIN users ON spotify_accounts.user_id = users.xata_id 722 + WHERE spotify_accounts.email = $1 723 + "#, 724 + ) 725 + .bind(email) 726 + .fetch_all(pool) 727 + .await?; 728 + 729 + match result.first() { 730 + Some(result) => { 731 + let token = decrypt_aes_256_ctr( 732 + &result.refresh_token, 733 + &hex::decode(env::var("SPOTIFY_ENCRYPTION_KEY")?)?, 734 + )?; 735 + Ok(Some((result.email.clone(), token, result.did.clone()))) 736 + } 737 + None => Ok(None), 738 + } 739 + } 740 + 741 + pub async fn watch_currently_playing( 742 + spotify_email: String, 743 + token: String, 744 + did: String, 745 + stop_flag: Arc<AtomicBool>, 746 + cache: Cache, 747 + ) -> Result<(), Error> { 748 + println!( 749 + "{} {}", 750 + format!("[{}]", spotify_email).bright_green(), 751 + "Checking currently playing".cyan() 752 + ); 753 + 754 + let stop_flag_clone = stop_flag.clone(); 755 + let spotify_email_clone = spotify_email.clone(); 756 + let cache_clone = cache.clone(); 757 + thread::spawn(move || { 758 + loop { 759 + if stop_flag_clone.load(std::sync::atomic::Ordering::Relaxed) { 760 + println!( 761 + "{} Stopping Thread", 762 + format!("[{}]", spotify_email_clone).bright_green() 763 + ); 764 + break; 765 + } 766 + if let Ok(Some(cached)) = cache_clone.get(&format!("{}:current", spotify_email_clone)) { 767 + if serde_json::from_str::<CurrentlyPlaying>(&cached).is_err() { 768 + thread::sleep(std::time::Duration::from_millis(800)); 769 + continue; 770 + } 771 + 772 + let mut current_song = serde_json::from_str::<CurrentlyPlaying>(&cached)?; 773 + 774 + if let Some(item) = current_song.item.clone() { 775 + if current_song.is_playing 776 + && current_song.progress_ms.unwrap_or(0) < item.duration_ms.into() 777 + { 778 + current_song.progress_ms = 779 + Some(current_song.progress_ms.unwrap_or(0) + 800); 780 + match cache_clone.setex( 781 + &format!("{}:current", spotify_email_clone), 782 + &serde_json::to_string(&current_song)?, 783 + 16, 784 + ) { 785 + Ok(_) => {} 786 + Err(e) => { 787 + println!( 788 + "{} redis error: {}", 789 + format!("[{}]", spotify_email_clone).bright_green(), 790 + e.to_string().bright_red() 791 + ); 792 + } 793 + } 794 + thread::sleep(std::time::Duration::from_millis(800)); 795 + continue; 796 + } 797 + } 798 + continue; 799 + } 800 + 801 + if let Ok(Some(cached)) = cache_clone.get(&spotify_email_clone) { 802 + if cached == "No content" { 803 + thread::sleep(std::time::Duration::from_millis(800)); 804 + continue; 805 + } 806 + match cache_clone.setex(&format!("{}:current", spotify_email_clone), &cached, 16) { 807 + Ok(_) => {} 808 + Err(e) => { 809 + println!( 810 + "{} redis error: {}", 811 + format!("[{}]", spotify_email_clone).bright_green(), 812 + e.to_string().bright_red() 813 + ); 814 + } 815 + } 816 + } 817 + 818 + thread::sleep(std::time::Duration::from_millis(800)); 819 + } 820 + Ok::<(), Error>(()) 821 + }); 822 + 823 + loop { 824 + if stop_flag.load(std::sync::atomic::Ordering::Relaxed) { 825 + println!( 826 + "{} Stopping Thread", 827 + format!("[{}]", spotify_email).bright_green() 828 + ); 829 + break; 830 + } 831 + let spotify_email = spotify_email.clone(); 832 + let token = token.clone(); 833 + let did = did.clone(); 834 + let cache = cache.clone(); 835 + 836 + let currently_playing = get_currently_playing(cache.clone(), &spotify_email, &token).await; 837 + let currently_playing = match currently_playing { 838 + Ok(currently_playing) => currently_playing, 839 + Err(e) => { 840 + println!( 841 + "{} {}", 842 + format!("[{}]", spotify_email).bright_green(), 843 + e.to_string().bright_red() 844 + ); 845 + tokio::time::sleep(tokio::time::Duration::from_millis(1000)).await; 846 + continue; 847 + } 848 + }; 849 + 850 + if let Some((data, changed)) = currently_playing { 851 + if data.item.is_none() { 852 + println!( 853 + "{} {}", 854 + format!("[{}]", spotify_email).bright_green(), 855 + "No song playing".yellow() 856 + ); 857 + tokio::time::sleep(tokio::time::Duration::from_millis(1000)).await; 858 + continue; 859 + } 860 + let data_item = data.item.unwrap(); 861 + println!( 862 + "{} {} is_playing: {} changed: {}", 863 + format!("[{}]", spotify_email).bright_green(), 864 + format!("{} - {}", data_item.name, data_item.artists[0].name).yellow(), 865 + data.is_playing, 866 + changed 867 + ); 868 + 869 + if changed { 870 + scrobble(cache.clone(), &spotify_email, &did, &token).await?; 871 + 872 + thread::spawn(move || { 873 + let rt = tokio::runtime::Runtime::new().unwrap(); 874 + match rt.block_on(async { 875 + get_album_tracks(cache.clone(), &data_item.album.id, &token).await?; 876 + get_album(cache.clone(), &data_item.album.id, &token).await?; 877 + update_library(cache.clone(), &spotify_email, &did, &token).await?; 878 + Ok::<(), Error>(()) 879 + }) { 880 + Ok(_) => {} 881 + Err(e) => { 882 + println!( 883 + "{} {}", 884 + format!("[{}]", spotify_email).bright_green(), 885 + e.to_string().bright_red() 886 + ); 887 + } 888 + } 889 + }); 890 + } 891 + } 892 + 893 + tokio::time::sleep(tokio::time::Duration::from_millis(1000)).await; 894 + } 895 + 896 + Ok(()) 897 + }
+3 -688
crates/spotify/src/main.rs
··· 7 7 8 8 use anyhow::Error; 9 9 use async_nats::connect; 10 - use cache::Cache; 11 - use crypto::decrypt_aes_256_ctr; 12 10 use dotenv::dotenv; 13 11 use owo_colors::OwoColorize; 14 - use reqwest::Client; 15 - use rocksky::{scrobble, update_library}; 16 - use sqlx::{postgres::PgPoolOptions, Pool, Postgres}; 12 + use rocksky_spotify::cache::Cache; 13 + use rocksky_spotify::{find_spotify_user, find_spotify_users, watch_currently_playing}; 14 + use sqlx::postgres::PgPoolOptions; 17 15 use tokio_stream::StreamExt; 18 - use types::{ 19 - album_tracks::AlbumTracks, 20 - currently_playing::{Album, Artist, CurrentlyPlaying}, 21 - spotify_token::SpotifyTokenWithEmail, 22 - token::AccessToken, 23 - }; 24 - 25 - pub mod cache; 26 - pub mod crypto; 27 - pub mod rocksky; 28 - pub mod token; 29 - pub mod types; 30 - 31 - const BASE_URL: &str = "https://spotify-api.rocksky.app/v1"; 32 16 33 17 #[tokio::main] 34 18 async fn main() -> Result<(), Box<dyn std::error::Error>> { ··· 226 210 227 211 Ok(()) 228 212 } 229 - 230 - pub async fn refresh_token(token: &str) -> Result<AccessToken, Error> { 231 - if env::var("SPOTIFY_CLIENT_ID").is_err() || env::var("SPOTIFY_CLIENT_SECRET").is_err() { 232 - panic!("Please set SPOTIFY_CLIENT_ID and SPOTIFY_CLIENT_SECRET environment variables"); 233 - } 234 - 235 - let client_id = env::var("SPOTIFY_CLIENT_ID")?; 236 - let client_secret = env::var("SPOTIFY_CLIENT_SECRET")?; 237 - 238 - let client = Client::new(); 239 - 240 - let response = client 241 - .post("https://accounts.spotify.com/api/token") 242 - .basic_auth(&client_id, Some(client_secret)) 243 - .form(&[ 244 - ("grant_type", "refresh_token"), 245 - ("refresh_token", token), 246 - ("client_id", &client_id), 247 - ]) 248 - .send() 249 - .await?; 250 - let token = response.json::<AccessToken>().await?; 251 - Ok(token) 252 - } 253 - 254 - pub async fn get_currently_playing( 255 - cache: Cache, 256 - user_id: &str, 257 - token: &str, 258 - ) -> Result<Option<(CurrentlyPlaying, bool)>, Error> { 259 - if let Ok(Some(data)) = cache.get(user_id) { 260 - println!( 261 - "{} {}", 262 - format!("[{}]", user_id).bright_green(), 263 - "Using cache".cyan() 264 - ); 265 - if data == "No content" { 266 - return Ok(None); 267 - } 268 - let decoded_data = serde_json::from_str::<CurrentlyPlaying>(&data); 269 - 270 - if decoded_data.is_err() { 271 - println!( 272 - "{} {} {}", 273 - format!("[{}]", user_id).bright_green(), 274 - "Cache is invalid".red(), 275 - data 276 - ); 277 - cache.setex(user_id, "No content", 10)?; 278 - cache.del(&format!("{}:current", user_id))?; 279 - return Ok(None); 280 - } 281 - 282 - let data: CurrentlyPlaying = decoded_data.unwrap(); 283 - // detect if the song has changed 284 - let previous = cache.get(&format!("{}:previous", user_id)); 285 - 286 - if previous.is_err() { 287 - println!( 288 - "{} redis error: {}", 289 - format!("[{}]", user_id).bright_green(), 290 - previous.unwrap_err().to_string().bright_red() 291 - ); 292 - return Ok(None); 293 - } 294 - 295 - let previous = previous.unwrap(); 296 - 297 - let changed = match previous { 298 - Some(previous) => { 299 - if serde_json::from_str::<CurrentlyPlaying>(&previous).is_err() { 300 - println!( 301 - "{} {} {}", 302 - format!("[{}]", user_id).bright_green(), 303 - "Previous cache is invalid", 304 - previous 305 - ); 306 - return Ok(None); 307 - } 308 - 309 - let previous: CurrentlyPlaying = serde_json::from_str(&previous)?; 310 - if previous.item.is_none() && data.item.is_some() { 311 - return Ok(Some((data, true))); 312 - } 313 - 314 - if previous.item.is_some() && data.item.is_none() { 315 - return Ok(Some((data, false))); 316 - } 317 - 318 - if previous.item.is_none() && data.item.is_none() { 319 - return Ok(Some((data, false))); 320 - } 321 - 322 - let previous_item = previous.item.unwrap(); 323 - let data_item = data.clone().item.unwrap(); 324 - previous_item.id != data_item.id 325 - && previous.progress_ms.unwrap_or(0) != data.progress_ms.unwrap_or(0) 326 - } 327 - _ => true, 328 - }; 329 - return Ok(Some((data, changed))); 330 - } 331 - 332 - let token = refresh_token(token).await?; 333 - let client = Client::new(); 334 - let response = client 335 - .get(format!("{}/me/player/currently-playing", BASE_URL)) 336 - .bearer_auth(token.access_token) 337 - .send() 338 - .await?; 339 - 340 - let headers = response.headers().clone(); 341 - let status = response.status().as_u16(); 342 - let data = response.text().await?; 343 - 344 - if status == 429 { 345 - println!( 346 - "{} Too many requests, retry-after {}", 347 - format!("[{}]", user_id).bright_green(), 348 - headers 349 - .get("retry-after") 350 - .unwrap() 351 - .to_str() 352 - .unwrap() 353 - .bright_green() 354 - ); 355 - return Ok(None); 356 - } 357 - 358 - let previous = cache.get(&format!("{}:previous", user_id)); 359 - if previous.is_err() { 360 - println!( 361 - "{} redis error: {}", 362 - format!("[{}]", user_id).bright_green(), 363 - previous.unwrap_err().to_string().bright_red() 364 - ); 365 - return Ok(None); 366 - } 367 - 368 - let previous = previous.unwrap(); 369 - 370 - // check if status code is 204 371 - if status == 204 { 372 - println!("No content"); 373 - match cache.setex( 374 - user_id, 375 - "No content", 376 - match previous.is_none() { 377 - true => 30, 378 - false => 10, 379 - }, 380 - ) { 381 - Ok(_) => {} 382 - Err(e) => { 383 - println!( 384 - "{} redis error: {}", 385 - format!("[{}]", user_id).bright_green(), 386 - e.to_string().bright_red() 387 - ); 388 - return Ok(None); 389 - } 390 - } 391 - match cache.del(&format!("{}:current", user_id)) { 392 - Ok(_) => {} 393 - Err(e) => { 394 - println!( 395 - "{} redis error: {}", 396 - format!("[{}]", user_id).bright_green(), 397 - e.to_string().bright_red() 398 - ); 399 - return Ok(None); 400 - } 401 - } 402 - return Ok(None); 403 - } 404 - 405 - if serde_json::from_str::<CurrentlyPlaying>(&data).is_err() { 406 - println!( 407 - "{} {} {}", 408 - format!("[{}]", user_id).bright_green(), 409 - "Invalid data received".red(), 410 - data 411 - ); 412 - match cache.setex(user_id, "No content", 10) { 413 - Ok(_) => {} 414 - Err(e) => { 415 - println!( 416 - "{} redis error: {}", 417 - format!("[{}]", user_id).bright_green(), 418 - e.to_string().bright_red() 419 - ); 420 - return Ok(None); 421 - } 422 - } 423 - match cache.del(&format!("{}:current", user_id)) { 424 - Ok(_) => {} 425 - Err(e) => { 426 - println!( 427 - "{} redis error: {}", 428 - format!("[{}]", user_id).bright_green(), 429 - e.to_string().bright_red() 430 - ); 431 - return Ok(None); 432 - } 433 - } 434 - return Ok(None); 435 - } 436 - 437 - let data = serde_json::from_str::<CurrentlyPlaying>(&data)?; 438 - 439 - match cache.setex( 440 - user_id, 441 - &serde_json::to_string(&data)?, 442 - match previous.is_none() { 443 - true => 30, 444 - false => 15, 445 - }, 446 - ) { 447 - Ok(_) => {} 448 - Err(e) => { 449 - println!( 450 - "{} redis error: {}", 451 - format!("[{}]", user_id).bright_green(), 452 - e.to_string().bright_red() 453 - ); 454 - return Ok(None); 455 - } 456 - } 457 - match cache.del(&format!("{}:current", user_id)) { 458 - Ok(_) => {} 459 - Err(e) => { 460 - println!( 461 - "{} redis error: {}", 462 - format!("[{}]", user_id).bright_green(), 463 - e.to_string().bright_red() 464 - ); 465 - return Ok(None); 466 - } 467 - } 468 - 469 - // detect if the song has changed 470 - let previous = cache.get(&format!("{}:previous", user_id)); 471 - 472 - if previous.is_err() { 473 - println!( 474 - "{} redis error: {}", 475 - format!("[{}]", user_id).bright_green(), 476 - previous.unwrap_err().to_string().bright_red() 477 - ); 478 - return Ok(None); 479 - } 480 - 481 - let previous = previous.unwrap(); 482 - let changed = match previous { 483 - Some(previous) => { 484 - if serde_json::from_str::<CurrentlyPlaying>(&previous).is_err() { 485 - println!( 486 - "{} {} {}", 487 - format!("[{}]", user_id).bright_green(), 488 - "Previous cache is invalid", 489 - previous 490 - ); 491 - return Ok(None); 492 - } 493 - 494 - let previous: CurrentlyPlaying = serde_json::from_str(&previous)?; 495 - if previous.item.is_none() || data.item.is_none() { 496 - return Ok(Some((data, false))); 497 - } 498 - 499 - let previous_item = previous.item.unwrap(); 500 - let data_item = data.clone().item.unwrap(); 501 - 502 - previous_item.id != data_item.id 503 - && previous.progress_ms.unwrap_or(0) != data.progress_ms.unwrap_or(0) 504 - } 505 - _ => false, 506 - }; 507 - 508 - // save as previous song 509 - match cache.setex( 510 - &format!("{}:previous", user_id), 511 - &serde_json::to_string(&data)?, 512 - 600, 513 - ) { 514 - Ok(_) => {} 515 - Err(e) => { 516 - println!( 517 - "{} redis error: {}", 518 - format!("[{}]", user_id).bright_green(), 519 - e.to_string().bright_red() 520 - ); 521 - return Ok(None); 522 - } 523 - } 524 - 525 - Ok(Some((data, changed))) 526 - } 527 - 528 - pub async fn get_artist( 529 - cache: Cache, 530 - artist_id: &str, 531 - token: &str, 532 - ) -> Result<Option<Artist>, Error> { 533 - if let Ok(Some(data)) = cache.get(artist_id) { 534 - return Ok(Some(serde_json::from_str(&data)?)); 535 - } 536 - 537 - let token = refresh_token(token).await?; 538 - let client = Client::new(); 539 - let response = client 540 - .get(&format!("{}/artists/{}", BASE_URL, artist_id)) 541 - .bearer_auth(token.access_token) 542 - .send() 543 - .await?; 544 - 545 - let headers = response.headers().clone(); 546 - let data = response.text().await?; 547 - 548 - if data == "Too many requests" { 549 - println!( 550 - "> retry-after {}", 551 - headers.get("retry-after").unwrap().to_str().unwrap() 552 - ); 553 - println!("> {} [get_artist]", data); 554 - return Ok(None); 555 - } 556 - 557 - match cache.setex(artist_id, &data, 20) { 558 - Ok(_) => {} 559 - Err(e) => { 560 - println!( 561 - "{} redis error: {}", 562 - format!("[{}]", artist_id).bright_green(), 563 - e.to_string().bright_red() 564 - ); 565 - return Ok(None); 566 - } 567 - } 568 - 569 - Ok(Some(serde_json::from_str(&data)?)) 570 - } 571 - 572 - pub async fn get_album(cache: Cache, album_id: &str, token: &str) -> Result<Option<Album>, Error> { 573 - if let Ok(Some(data)) = cache.get(album_id) { 574 - return Ok(Some(serde_json::from_str(&data)?)); 575 - } 576 - 577 - let token = refresh_token(token).await?; 578 - let client = Client::new(); 579 - let response = client 580 - .get(&format!("{}/albums/{}", BASE_URL, album_id)) 581 - .bearer_auth(token.access_token) 582 - .send() 583 - .await?; 584 - 585 - let headers = response.headers().clone(); 586 - let data = response.text().await?; 587 - 588 - if data == "Too many requests" { 589 - println!( 590 - "> retry-after {}", 591 - headers.get("retry-after").unwrap().to_str().unwrap() 592 - ); 593 - println!("> {} [get_album]", data); 594 - return Ok(None); 595 - } 596 - 597 - match cache.setex(album_id, &data, 20) { 598 - Ok(_) => {} 599 - Err(e) => { 600 - println!( 601 - "{} redis error: {}", 602 - format!("[{}]", album_id).bright_green(), 603 - e.to_string().bright_red() 604 - ); 605 - return Ok(None); 606 - } 607 - } 608 - 609 - Ok(Some(serde_json::from_str(&data)?)) 610 - } 611 - 612 - pub async fn get_album_tracks( 613 - cache: Cache, 614 - album_id: &str, 615 - token: &str, 616 - ) -> Result<AlbumTracks, Error> { 617 - if let Ok(Some(data)) = cache.get(&format!("{}:tracks", album_id)) { 618 - return Ok(serde_json::from_str(&data)?); 619 - } 620 - 621 - let token = refresh_token(token).await?; 622 - let client = Client::new(); 623 - let mut all_tracks = Vec::new(); 624 - let mut offset = 0; 625 - let limit = 50; 626 - 627 - loop { 628 - let response = client 629 - .get(&format!("{}/albums/{}/tracks", BASE_URL, album_id)) 630 - .bearer_auth(&token.access_token) 631 - .query(&[ 632 - ("limit", &limit.to_string()), 633 - ("offset", &offset.to_string()), 634 - ]) 635 - .send() 636 - .await?; 637 - 638 - let headers = response.headers().clone(); 639 - let data = response.text().await?; 640 - if data == "Too many requests" { 641 - println!( 642 - "> retry-after {}", 643 - headers.get("retry-after").unwrap().to_str().unwrap() 644 - ); 645 - println!("> {} [get_album_tracks]", data); 646 - continue; 647 - } 648 - 649 - let album_tracks: AlbumTracks = serde_json::from_str(&data)?; 650 - 651 - if album_tracks.items.is_empty() { 652 - break; 653 - } 654 - 655 - all_tracks.extend(album_tracks.items); 656 - offset += limit; 657 - } 658 - 659 - let all_tracks_json = serde_json::to_string(&all_tracks)?; 660 - match cache.setex(&format!("{}:tracks", album_id), &all_tracks_json, 20) { 661 - Ok(_) => {} 662 - Err(e) => { 663 - println!( 664 - "{} redis error: {}", 665 - format!("[{}]", album_id).bright_green(), 666 - e.to_string().bright_red() 667 - ); 668 - } 669 - } 670 - 671 - Ok(AlbumTracks { 672 - items: all_tracks, 673 - ..Default::default() 674 - }) 675 - } 676 - 677 - pub async fn find_spotify_users( 678 - pool: &Pool<Postgres>, 679 - offset: usize, 680 - limit: usize, 681 - ) -> Result<Vec<(String, String, String, String)>, Error> { 682 - let results: Vec<SpotifyTokenWithEmail> = sqlx::query_as( 683 - r#" 684 - SELECT * FROM spotify_tokens 685 - LEFT JOIN spotify_accounts ON spotify_tokens.user_id = spotify_accounts.user_id 686 - LEFT JOIN users ON spotify_accounts.user_id = users.xata_id 687 - LIMIT $1 OFFSET $2 688 - "#, 689 - ) 690 - .bind(limit as i64) 691 - .bind(offset as i64) 692 - .fetch_all(pool) 693 - .await?; 694 - 695 - let mut user_tokens = vec![]; 696 - 697 - for result in &results { 698 - let token = decrypt_aes_256_ctr( 699 - &result.refresh_token, 700 - &hex::decode(env::var("SPOTIFY_ENCRYPTION_KEY")?)?, 701 - )?; 702 - user_tokens.push(( 703 - result.email.clone(), 704 - token, 705 - result.did.clone(), 706 - result.user_id.clone(), 707 - )); 708 - } 709 - 710 - Ok(user_tokens) 711 - } 712 - 713 - pub async fn find_spotify_user( 714 - pool: &Pool<Postgres>, 715 - email: &str, 716 - ) -> Result<Option<(String, String, String)>, Error> { 717 - let result: Vec<SpotifyTokenWithEmail> = sqlx::query_as( 718 - r#" 719 - SELECT * FROM spotify_tokens 720 - LEFT JOIN spotify_accounts ON spotify_tokens.user_id = spotify_accounts.user_id 721 - LEFT JOIN users ON spotify_accounts.user_id = users.xata_id 722 - WHERE spotify_accounts.email = $1 723 - "#, 724 - ) 725 - .bind(email) 726 - .fetch_all(pool) 727 - .await?; 728 - 729 - match result.first() { 730 - Some(result) => { 731 - let token = decrypt_aes_256_ctr( 732 - &result.refresh_token, 733 - &hex::decode(env::var("SPOTIFY_ENCRYPTION_KEY")?)?, 734 - )?; 735 - Ok(Some((result.email.clone(), token, result.did.clone()))) 736 - } 737 - None => Ok(None), 738 - } 739 - } 740 - 741 - pub async fn watch_currently_playing( 742 - spotify_email: String, 743 - token: String, 744 - did: String, 745 - stop_flag: Arc<AtomicBool>, 746 - cache: Cache, 747 - ) -> Result<(), Error> { 748 - println!( 749 - "{} {}", 750 - format!("[{}]", spotify_email).bright_green(), 751 - "Checking currently playing".cyan() 752 - ); 753 - 754 - let stop_flag_clone = stop_flag.clone(); 755 - let spotify_email_clone = spotify_email.clone(); 756 - let cache_clone = cache.clone(); 757 - thread::spawn(move || { 758 - loop { 759 - if stop_flag_clone.load(std::sync::atomic::Ordering::Relaxed) { 760 - println!( 761 - "{} Stopping Thread", 762 - format!("[{}]", spotify_email_clone).bright_green() 763 - ); 764 - break; 765 - } 766 - if let Ok(Some(cached)) = cache_clone.get(&format!("{}:current", spotify_email_clone)) { 767 - if serde_json::from_str::<CurrentlyPlaying>(&cached).is_err() { 768 - thread::sleep(std::time::Duration::from_millis(800)); 769 - continue; 770 - } 771 - 772 - let mut current_song = serde_json::from_str::<CurrentlyPlaying>(&cached)?; 773 - 774 - if let Some(item) = current_song.item.clone() { 775 - if current_song.is_playing 776 - && current_song.progress_ms.unwrap_or(0) < item.duration_ms.into() 777 - { 778 - current_song.progress_ms = 779 - Some(current_song.progress_ms.unwrap_or(0) + 800); 780 - match cache_clone.setex( 781 - &format!("{}:current", spotify_email_clone), 782 - &serde_json::to_string(&current_song)?, 783 - 16, 784 - ) { 785 - Ok(_) => {} 786 - Err(e) => { 787 - println!( 788 - "{} redis error: {}", 789 - format!("[{}]", spotify_email_clone).bright_green(), 790 - e.to_string().bright_red() 791 - ); 792 - } 793 - } 794 - thread::sleep(std::time::Duration::from_millis(800)); 795 - continue; 796 - } 797 - } 798 - continue; 799 - } 800 - 801 - if let Ok(Some(cached)) = cache_clone.get(&spotify_email_clone) { 802 - if cached == "No content" { 803 - thread::sleep(std::time::Duration::from_millis(800)); 804 - continue; 805 - } 806 - match cache_clone.setex(&format!("{}:current", spotify_email_clone), &cached, 16) { 807 - Ok(_) => {} 808 - Err(e) => { 809 - println!( 810 - "{} redis error: {}", 811 - format!("[{}]", spotify_email_clone).bright_green(), 812 - e.to_string().bright_red() 813 - ); 814 - } 815 - } 816 - } 817 - 818 - thread::sleep(std::time::Duration::from_millis(800)); 819 - } 820 - Ok::<(), Error>(()) 821 - }); 822 - 823 - loop { 824 - if stop_flag.load(std::sync::atomic::Ordering::Relaxed) { 825 - println!( 826 - "{} Stopping Thread", 827 - format!("[{}]", spotify_email).bright_green() 828 - ); 829 - break; 830 - } 831 - let spotify_email = spotify_email.clone(); 832 - let token = token.clone(); 833 - let did = did.clone(); 834 - let cache = cache.clone(); 835 - 836 - let currently_playing = get_currently_playing(cache.clone(), &spotify_email, &token).await; 837 - let currently_playing = match currently_playing { 838 - Ok(currently_playing) => currently_playing, 839 - Err(e) => { 840 - println!( 841 - "{} {}", 842 - format!("[{}]", spotify_email).bright_green(), 843 - e.to_string().bright_red() 844 - ); 845 - tokio::time::sleep(tokio::time::Duration::from_millis(1000)).await; 846 - continue; 847 - } 848 - }; 849 - 850 - if let Some((data, changed)) = currently_playing { 851 - if data.item.is_none() { 852 - println!( 853 - "{} {}", 854 - format!("[{}]", spotify_email).bright_green(), 855 - "No song playing".yellow() 856 - ); 857 - tokio::time::sleep(tokio::time::Duration::from_millis(1000)).await; 858 - continue; 859 - } 860 - let data_item = data.item.unwrap(); 861 - println!( 862 - "{} {} is_playing: {} changed: {}", 863 - format!("[{}]", spotify_email).bright_green(), 864 - format!("{} - {}", data_item.name, data_item.artists[0].name).yellow(), 865 - data.is_playing, 866 - changed 867 - ); 868 - 869 - if changed { 870 - scrobble(cache.clone(), &spotify_email, &did, &token).await?; 871 - 872 - thread::spawn(move || { 873 - let rt = tokio::runtime::Runtime::new().unwrap(); 874 - match rt.block_on(async { 875 - get_album_tracks(cache.clone(), &data_item.album.id, &token).await?; 876 - get_album(cache.clone(), &data_item.album.id, &token).await?; 877 - update_library(cache.clone(), &spotify_email, &did, &token).await?; 878 - Ok::<(), Error>(()) 879 - }) { 880 - Ok(_) => {} 881 - Err(e) => { 882 - println!( 883 - "{} {}", 884 - format!("[{}]", spotify_email).bright_green(), 885 - e.to_string().bright_red() 886 - ); 887 - } 888 - } 889 - }); 890 - } 891 - } 892 - 893 - tokio::time::sleep(tokio::time::Duration::from_millis(1000)).await; 894 - } 895 - 896 - Ok(()) 897 - }
+1 -1
crates/storage/Cargo.toml
··· 1 1 [package] 2 - name = "storage" 2 + name = "rocksky-storage" 3 3 version = "0.1.0" 4 4 authors.workspace = true 5 5 edition.workspace = true
+1
crates/storage/src/lib.rs
··· 1 +
+24
crates/tracklist/Cargo.toml
··· 1 + [package] 2 + name = "rocksky-tracklist" 3 + version = "0.1.0" 4 + authors.workspace = true 5 + edition.workspace = true 6 + license.workspace = true 7 + repository.workspace = true 8 + 9 + 10 + [dependencies] 11 + redis = { version = "0.29.0", features = ["tokio-rustls-comp"] } 12 + owo-colors = "4.1.0" 13 + anyhow = "1.0.96" 14 + async-nats = "0.39.0" 15 + dotenv = "0.15.0" 16 + serde = { version = "1.0.217", features = ["derive"] } 17 + serde_json = "1.0.139" 18 + tokio = { version = "1.43.0", features = ["full"] } 19 + tokio-stream = { version = "0.1.17", features = ["full"] } 20 + actix-web = "4.9.0" 21 + polars = "0.46.0" 22 + clap = "4.5.31" 23 + rand = "0.9.2" 24 + uuid = { version = "1.18.0", features = ["v4"] }
+47
crates/tracklist/src/handlers/mod.rs
··· 1 + use crate::handlers::tracklist::*; 2 + use actix_web::{web, HttpRequest, HttpResponse}; 3 + use anyhow::Error; 4 + use std::sync::Arc; 5 + 6 + pub mod tracklist; 7 + 8 + #[macro_export] 9 + macro_rules! read_payload { 10 + ($payload:expr) => {{ 11 + let mut body = Vec::new(); 12 + while let Some(chunk) = $payload.next().await { 13 + // skip if None 14 + match chunk { 15 + Ok(bytes) => body.extend_from_slice(&bytes), 16 + Err(err) => return Err(err.into()), 17 + } 18 + } 19 + body 20 + }}; 21 + } 22 + 23 + pub async fn handle( 24 + method: &str, 25 + payload: &mut web::Payload, 26 + req: &HttpRequest, 27 + conn: Arc<redis::Client>, 28 + ) -> Result<HttpResponse, Error> { 29 + match method { 30 + "tracklist.addTrack" => add_track(payload, req, conn.clone()).await, 31 + "tracklist.insertTrackAt" => insert_track_at(payload, req, conn.clone()).await, 32 + "tracklist.removeTrackAt" => remove_track_at(payload, req, conn.clone()).await, 33 + "tracklist.shuffleQueue" => shuffle_queue(payload, req, conn.clone()).await, 34 + "tracklist.getQueue" => get_queue(payload, req, conn.clone()).await, 35 + "tracklist.clearQueue" => clear_queue(payload, req, conn.clone()).await, 36 + "tracklist.getQueueLength" => get_queue_length(payload, req, conn.clone()).await, 37 + "tracklist.isQueueEmpty" => is_queue_empty(payload, req, conn.clone()).await, 38 + "tracklist.setCurrentTrack" => set_current_track(payload, req, conn.clone()).await, 39 + "tracklist.getCurrentTrack" => get_current_track(payload, req, conn.clone()).await, 40 + "tracklist.clearCurrentTrack" => clear_current_track(payload, req, conn.clone()).await, 41 + "tracklist.moveTrack" => move_track(payload, req, conn.clone()).await, 42 + "tracklist.replaceQueue" => replace_queue(payload, req, conn.clone()).await, 43 + "tracklist.getTrackAt" => get_track_at(payload, req, conn.clone()).await, 44 + "tracklist.insertTracksAt" => insert_tracks_at(payload, req, conn.clone()).await, 45 + _ => return Err(anyhow::anyhow!("Method not found")), 46 + } 47 + }
+205
crates/tracklist/src/handlers/tracklist.rs
··· 1 + use std::sync::Arc; 2 + 3 + use actix_web::{web, HttpRequest, HttpResponse}; 4 + use anyhow::Error; 5 + use serde_json::json; 6 + use tokio_stream::StreamExt; 7 + 8 + use crate::{queue, read_payload, types::*}; 9 + 10 + pub async fn add_track( 11 + payload: &mut web::Payload, 12 + _req: &HttpRequest, 13 + client: Arc<redis::Client>, 14 + ) -> Result<HttpResponse, Error> { 15 + let body = read_payload!(payload); 16 + let params = serde_json::from_slice::<AddTrackParams>(&body)?; 17 + 18 + let new_queue = queue::add_track(&client, &params.did, &params.track_id).await?; 19 + 20 + Ok(HttpResponse::Ok().json(web::Json(json!(new_queue)))) 21 + } 22 + 23 + pub async fn insert_track_at( 24 + payload: &mut web::Payload, 25 + _req: &HttpRequest, 26 + client: Arc<redis::Client>, 27 + ) -> Result<HttpResponse, Error> { 28 + let body = read_payload!(payload); 29 + let params = serde_json::from_slice::<InsertTrackAtParams>(&body)?; 30 + 31 + let new_queue = 32 + queue::insert_track_at(&client, &params.did, params.index, &params.track_id).await?; 33 + 34 + Ok(HttpResponse::Ok().json(web::Json(json!(new_queue)))) 35 + } 36 + 37 + pub async fn remove_track_at( 38 + payload: &mut web::Payload, 39 + _req: &HttpRequest, 40 + client: Arc<redis::Client>, 41 + ) -> Result<HttpResponse, Error> { 42 + let body = read_payload!(payload); 43 + let params = serde_json::from_slice::<RemoveTrackAtParams>(&body)?; 44 + 45 + let new_queue = queue::remove_track_at(&client, &params.did, params.index).await?; 46 + 47 + Ok(HttpResponse::Ok().json(web::Json(json!(new_queue)))) 48 + } 49 + 50 + pub async fn shuffle_queue( 51 + payload: &mut web::Payload, 52 + _req: &HttpRequest, 53 + client: Arc<redis::Client>, 54 + ) -> Result<HttpResponse, Error> { 55 + let body = read_payload!(payload); 56 + let params = serde_json::from_slice::<ShuffleQueueParams>(&body)?; 57 + 58 + let shuffled_queue = queue::shuffle_queue(&client, &params.did).await?; 59 + 60 + Ok(HttpResponse::Ok().json(web::Json(json!(shuffled_queue)))) 61 + } 62 + 63 + pub async fn get_queue( 64 + payload: &mut web::Payload, 65 + _req: &HttpRequest, 66 + client: Arc<redis::Client>, 67 + ) -> Result<HttpResponse, Error> { 68 + let body = read_payload!(payload); 69 + let params = serde_json::from_slice::<GetQueueParams>(&body)?; 70 + 71 + let tracks = queue::get_queue(&client, &params.did).await?; 72 + 73 + Ok(HttpResponse::Ok().json(web::Json(tracks))) 74 + } 75 + 76 + pub async fn clear_queue( 77 + payload: &mut web::Payload, 78 + _req: &HttpRequest, 79 + client: Arc<redis::Client>, 80 + ) -> Result<HttpResponse, Error> { 81 + let body = read_payload!(payload); 82 + let params = serde_json::from_slice::<ClearQueueParams>(&body)?; 83 + 84 + queue::clear_queue(&client, &params.did).await?; 85 + 86 + Ok(HttpResponse::Ok().json(web::Json(json!({})))) 87 + } 88 + 89 + pub async fn get_queue_length( 90 + payload: &mut web::Payload, 91 + _req: &HttpRequest, 92 + client: Arc<redis::Client>, 93 + ) -> Result<HttpResponse, Error> { 94 + let body = read_payload!(payload); 95 + let params = serde_json::from_slice::<GetQueueLengthParams>(&body)?; 96 + 97 + let length = queue::get_queue_length(&client, &params.did).await?; 98 + 99 + Ok(HttpResponse::Ok().json(web::Json(json!({ "length": length })))) 100 + } 101 + 102 + pub async fn is_queue_empty( 103 + payload: &mut web::Payload, 104 + _req: &HttpRequest, 105 + client: Arc<redis::Client>, 106 + ) -> Result<HttpResponse, Error> { 107 + let body = read_payload!(payload); 108 + let params = serde_json::from_slice::<IsQueueEmptyParams>(&body)?; 109 + 110 + let is_empty = queue::is_queue_empty(&client, &params.did).await?; 111 + 112 + Ok(HttpResponse::Ok().json(web::Json(json!({ "is_empty": is_empty })))) 113 + } 114 + 115 + pub async fn set_current_track( 116 + payload: &mut web::Payload, 117 + _req: &HttpRequest, 118 + client: Arc<redis::Client>, 119 + ) -> Result<HttpResponse, Error> { 120 + let body = read_payload!(payload); 121 + let params = serde_json::from_slice::<SetCurrentTrackParams>(&body)?; 122 + 123 + queue::set_current_track(&client, &params.did, params.index).await?; 124 + 125 + Ok(HttpResponse::Ok().json(web::Json(json!({})))) 126 + } 127 + 128 + pub async fn get_current_track( 129 + payload: &mut web::Payload, 130 + _req: &HttpRequest, 131 + client: Arc<redis::Client>, 132 + ) -> Result<HttpResponse, Error> { 133 + let body = read_payload!(payload); 134 + let params = serde_json::from_slice::<GetCurrentTrackParams>(&body)?; 135 + 136 + let current_track = queue::get_current_track(&client, &params.did).await?; 137 + 138 + Ok(HttpResponse::Ok().json(web::Json(json!({ "current_track": current_track })))) 139 + } 140 + 141 + pub async fn clear_current_track( 142 + payload: &mut web::Payload, 143 + _req: &HttpRequest, 144 + client: Arc<redis::Client>, 145 + ) -> Result<HttpResponse, Error> { 146 + let body = read_payload!(payload); 147 + let params = serde_json::from_slice::<ClearCurrentTrackParams>(&body)?; 148 + 149 + queue::clear_current_track(&client, &params.did).await?; 150 + 151 + Ok(HttpResponse::Ok().json(web::Json(json!({})))) 152 + } 153 + 154 + pub async fn move_track( 155 + payload: &mut web::Payload, 156 + _req: &HttpRequest, 157 + client: Arc<redis::Client>, 158 + ) -> Result<HttpResponse, Error> { 159 + let body = read_payload!(payload); 160 + let params = serde_json::from_slice::<MoveTrackParams>(&body)?; 161 + 162 + let new_queue = queue::move_track(&client, &params.did, params.from, params.to).await?; 163 + 164 + Ok(HttpResponse::Ok().json(web::Json(json!(new_queue)))) 165 + } 166 + 167 + pub async fn replace_queue( 168 + payload: &mut web::Payload, 169 + _req: &HttpRequest, 170 + client: Arc<redis::Client>, 171 + ) -> Result<HttpResponse, Error> { 172 + let body = read_payload!(payload); 173 + let params = serde_json::from_slice::<ReplaceQueueParams>(&body)?; 174 + 175 + let new_queue = queue::replace_queue(&client, &params.did, params.track_ids).await?; 176 + 177 + Ok(HttpResponse::Ok().json(web::Json(json!(new_queue)))) 178 + } 179 + 180 + pub async fn get_track_at( 181 + payload: &mut web::Payload, 182 + _req: &HttpRequest, 183 + client: Arc<redis::Client>, 184 + ) -> Result<HttpResponse, Error> { 185 + let body = read_payload!(payload); 186 + let params = serde_json::from_slice::<GetTrackAtParams>(&body)?; 187 + 188 + let track_id = queue::get_track_at(&client, &params.did, params.index).await?; 189 + 190 + Ok(HttpResponse::Ok().json(web::Json(json!({ "track_id": track_id })))) 191 + } 192 + 193 + pub async fn insert_tracks_at( 194 + payload: &mut web::Payload, 195 + _req: &HttpRequest, 196 + client: Arc<redis::Client>, 197 + ) -> Result<HttpResponse, Error> { 198 + let body = read_payload!(payload); 199 + let params = serde_json::from_slice::<InsertTracksAtParams>(&body)?; 200 + 201 + let new_queue = 202 + queue::insert_tracks_at(&client, &params.did, params.index, params.track_ids).await?; 203 + 204 + Ok(HttpResponse::Ok().json(web::Json(json!(new_queue)))) 205 + }
+11
crates/tracklist/src/lib.rs
··· 1 + pub mod handlers; 2 + pub mod queue; 3 + pub mod server; 4 + pub mod types; 5 + 6 + use anyhow::Error; 7 + 8 + pub async fn run() -> Result<(), Error> { 9 + server::run().await?; 10 + Ok(()) 11 + }
+609
crates/tracklist/src/queue.rs
··· 1 + use anyhow::Error; 2 + use rand::seq::SliceRandom; 3 + use redis::AsyncCommands; 4 + 5 + pub async fn add_track( 6 + client: &redis::Client, 7 + did: &str, 8 + track_id: &str, 9 + ) -> Result<Vec<String>, Error> { 10 + let mut conn = client.get_multiplexed_async_connection().await?; 11 + 12 + conn.rpush::<_, _, i32>(format!("user:{}:queue", did), track_id) 13 + .await?; 14 + 15 + let queue: Vec<String> = conn.lrange(format!("user:{}:queue", did), 0, -1).await?; 16 + 17 + Ok(queue) 18 + } 19 + 20 + pub async fn insert_track_at( 21 + client: &redis::Client, 22 + did: &str, 23 + position: usize, 24 + track_id: &str, 25 + ) -> Result<Vec<String>, Error> { 26 + let mut conn = client.get_multiplexed_async_connection().await?; 27 + 28 + let queue: Vec<String> = conn.lrange(format!("user:{}:queue", did), 0, -1).await?; 29 + 30 + let mut new_queue = queue.clone(); 31 + if position >= new_queue.len() { 32 + new_queue.push(track_id.to_string()); 33 + } else { 34 + new_queue.insert(position, track_id.to_string()); 35 + } 36 + 37 + let mut pipeline = redis::pipe(); 38 + pipeline 39 + .atomic() 40 + .del(format!("user:{}:queue", did)) 41 + .rpush(format!("user:{}:queue", did), new_queue.clone()) 42 + .query_async::<()>(&mut conn) 43 + .await?; 44 + 45 + Ok(new_queue) 46 + } 47 + 48 + pub async fn remove_track_at( 49 + client: &redis::Client, 50 + did: &str, 51 + position: usize, 52 + ) -> Result<Vec<String>, Error> { 53 + let mut conn = client.get_multiplexed_async_connection().await?; 54 + 55 + let queue: Vec<String> = conn.lrange(format!("user:{}:queue", did), 0, -1).await?; 56 + if position < queue.len() { 57 + let _: i32 = conn 58 + .lrem::<_, _, i32>(format!("user:{}:queue", did), 1, &queue[position]) 59 + .await?; 60 + } 61 + 62 + let new_queue: Vec<String> = conn.lrange(format!("user:{}:queue", did), 0, -1).await?; 63 + Ok(new_queue) 64 + } 65 + 66 + pub async fn shuffle_queue(client: &redis::Client, did: &str) -> Result<Vec<String>, Error> { 67 + let mut conn = client.get_multiplexed_async_connection().await?; 68 + 69 + let mut queue: Vec<String> = conn.lrange(format!("user:{}:queue", did), 0, -1).await?; 70 + let old_queue = queue.clone(); 71 + 72 + loop { 73 + let mut rng = rand::rng(); 74 + queue.shuffle(&mut rng); 75 + if queue != old_queue { 76 + break; 77 + } 78 + } 79 + 80 + redis::pipe() 81 + .atomic() 82 + .del(format!("user:{}:queue", did)) 83 + .rpush(format!("user:{}:queue", did), queue.clone()) 84 + .query_async::<()>(&mut conn) 85 + .await?; 86 + 87 + Ok(queue) 88 + } 89 + 90 + pub async fn get_queue(client: &redis::Client, did: &str) -> Result<Vec<String>, Error> { 91 + let mut conn = client.get_multiplexed_async_connection().await?; 92 + 93 + let queue: Vec<String> = conn.lrange(format!("user:{}:queue", did), 0, -1).await?; 94 + Ok(queue) 95 + } 96 + 97 + pub async fn clear_queue(client: &redis::Client, did: &str) -> Result<(), Error> { 98 + let mut conn = client.get_multiplexed_async_connection().await?; 99 + 100 + redis::pipe() 101 + .atomic() 102 + .del(format!("user:{}:queue", did)) 103 + .query_async::<()>(&mut conn) 104 + .await?; 105 + 106 + Ok(()) 107 + } 108 + 109 + pub async fn get_queue_length(client: &redis::Client, did: &str) -> Result<usize, Error> { 110 + let mut conn = client.get_multiplexed_async_connection().await?; 111 + 112 + let length: usize = conn.llen(format!("user:{}:queue", did)).await?; 113 + Ok(length) 114 + } 115 + 116 + pub async fn is_queue_empty(client: &redis::Client, did: &str) -> Result<bool, Error> { 117 + let length = get_queue_length(client, did).await?; 118 + Ok(length == 0) 119 + } 120 + 121 + pub async fn set_current_track( 122 + client: &redis::Client, 123 + did: &str, 124 + position: usize, 125 + ) -> Result<(), Error> { 126 + let mut conn = client.get_multiplexed_async_connection().await?; 127 + 128 + conn.set::<_, _, ()>(format!("user:{}:current_track", did), position) 129 + .await?; 130 + 131 + Ok(()) 132 + } 133 + 134 + pub async fn get_current_track(client: &redis::Client, did: &str) -> Result<Option<usize>, Error> { 135 + let mut conn = client.get_multiplexed_async_connection().await?; 136 + 137 + let position: Option<usize> = conn 138 + .get::<_, Option<usize>>(format!("user:{}:current_track", did)) 139 + .await?; 140 + 141 + Ok(position) 142 + } 143 + 144 + pub async fn clear_current_track(client: &redis::Client, did: &str) -> Result<(), Error> { 145 + let mut conn = client.get_multiplexed_async_connection().await?; 146 + 147 + conn.del::<_, ()>(format!("user:{}:current_track", did)) 148 + .await?; 149 + 150 + Ok(()) 151 + } 152 + 153 + pub async fn move_track( 154 + client: &redis::Client, 155 + did: &str, 156 + from: usize, 157 + to: usize, 158 + ) -> Result<Vec<String>, Error> { 159 + let mut conn = client.get_multiplexed_async_connection().await?; 160 + 161 + let queue: Vec<String> = conn.lrange(format!("user:{}:queue", did), 0, -1).await?; 162 + if from >= queue.len() || to >= queue.len() { 163 + return Ok(queue); 164 + } 165 + 166 + let mut new_queue = queue.clone(); 167 + let track = new_queue.remove(from); 168 + new_queue.insert(to, track); 169 + 170 + redis::pipe() 171 + .atomic() 172 + .del(format!("user:{}:queue", did)) 173 + .rpush(format!("user:{}:queue", did), new_queue.clone()) 174 + .query_async::<()>(&mut conn) 175 + .await?; 176 + 177 + Ok(new_queue) 178 + } 179 + 180 + pub async fn replace_queue( 181 + client: &redis::Client, 182 + did: &str, 183 + new_queue: Vec<String>, 184 + ) -> Result<Vec<String>, Error> { 185 + let mut conn = client.get_multiplexed_async_connection().await?; 186 + 187 + redis::pipe() 188 + .atomic() 189 + .del(format!("user:{}:queue", did)) 190 + .rpush(format!("user:{}:queue", did), new_queue.clone()) 191 + .query_async::<()>(&mut conn) 192 + .await?; 193 + 194 + Ok(new_queue) 195 + } 196 + 197 + pub async fn get_track_at( 198 + client: &redis::Client, 199 + did: &str, 200 + position: usize, 201 + ) -> Result<Option<String>, Error> { 202 + let mut conn = client.get_multiplexed_async_connection().await?; 203 + 204 + let track: Option<String> = conn 205 + .lindex::<_, Option<String>>(format!("user:{}:queue", did), position as isize) 206 + .await?; 207 + 208 + Ok(track) 209 + } 210 + 211 + pub async fn insert_tracks_at( 212 + client: &redis::Client, 213 + did: &str, 214 + position: usize, 215 + track_ids: Vec<String>, 216 + ) -> Result<Vec<String>, Error> { 217 + let mut conn = client.get_multiplexed_async_connection().await?; 218 + 219 + let queue: Vec<String> = conn.lrange(format!("user:{}:queue", did), 0, -1).await?; 220 + 221 + let mut new_queue = queue.clone(); 222 + if position >= new_queue.len() { 223 + new_queue.extend(track_ids); 224 + } else { 225 + for (i, track_id) in track_ids.into_iter().enumerate() { 226 + new_queue.insert(position + i, track_id); 227 + } 228 + } 229 + 230 + let mut pipeline = redis::pipe(); 231 + pipeline 232 + .atomic() 233 + .del(format!("user:{}:queue", did)) 234 + .rpush(format!("user:{}:queue", did), new_queue.clone()) 235 + .query_async::<()>(&mut conn) 236 + .await?; 237 + 238 + Ok(new_queue) 239 + } 240 + 241 + #[cfg(test)] 242 + mod tests { 243 + use super::*; 244 + use anyhow::Error; 245 + use redis::AsyncCommands; 246 + use uuid::Uuid; 247 + 248 + async fn setup_redis() -> redis::Client { 249 + redis::Client::open("redis://localhost:6379/").expect("Failed to create Redis client") 250 + } 251 + 252 + async fn cleanup(client: &redis::Client, did: &str) -> Result<(), Error> { 253 + let mut conn = client.get_multiplexed_async_connection().await?; 254 + conn.del::<_, ()>(format!("user:{}:queue", did)).await?; 255 + Ok(()) 256 + } 257 + 258 + #[tokio::test] 259 + async fn test_add_track() -> Result<(), Error> { 260 + let client = setup_redis().await; 261 + let did = Uuid::new_v4().to_string(); 262 + let track_id = "track:67890"; 263 + 264 + // Add a track 265 + add_track(&client, &did, track_id).await?; 266 + let queue = get_queue(&client, &did).await?; 267 + assert_eq!(queue, vec![track_id]); 268 + 269 + // Add another track 270 + let track_id2 = "track:67891"; 271 + add_track(&client, &did, track_id2).await?; 272 + let queue = get_queue(&client, &did).await?; 273 + assert_eq!(queue, vec![track_id, track_id2]); 274 + 275 + // Cleanup 276 + cleanup(&client, &did).await?; 277 + Ok(()) 278 + } 279 + 280 + #[tokio::test] 281 + async fn test_insert_track_at() -> Result<(), Error> { 282 + let client = setup_redis().await; 283 + let did = Uuid::new_v4().to_string(); 284 + let track_ids = vec!["track:67890", "track:67891", "track:67892"]; 285 + 286 + for &track_id in &track_ids { 287 + add_track(&client, &did, track_id).await?; 288 + } 289 + 290 + let new_track = "track:67893"; 291 + insert_track_at(&client, &did, 1, new_track).await?; 292 + let queue: Vec<String> = get_queue(&client, &did).await?; 293 + assert_eq!( 294 + queue, 295 + vec!["track:67890", "track:67893", "track:67891", "track:67892"] 296 + ); 297 + 298 + let end_track = "track:67894"; 299 + insert_track_at(&client, &did, 10, end_track).await?; 300 + let queue = get_queue(&client, &did).await?; 301 + assert_eq!( 302 + queue, 303 + vec![ 304 + "track:67890", 305 + "track:67893", 306 + "track:67891", 307 + "track:67892", 308 + "track:67894" 309 + ] 310 + ); 311 + 312 + let new_did = Uuid::new_v4().to_string(); 313 + insert_track_at(&client, &new_did, 0, "track:67895").await?; 314 + let queue = get_queue(&client, &new_did).await?; 315 + assert_eq!(queue, vec!["track:67895"]); 316 + 317 + cleanup(&client, &did).await?; 318 + cleanup(&client, &new_did).await?; 319 + Ok(()) 320 + } 321 + 322 + #[tokio::test] 323 + async fn test_remove_track_at() -> Result<(), Error> { 324 + let client = setup_redis().await; 325 + let did = Uuid::new_v4().to_string(); 326 + let track_ids = vec!["track:67890", "track:67891", "track:67892"]; 327 + 328 + for &track_id in &track_ids { 329 + add_track(&client, &did, track_id).await?; 330 + } 331 + 332 + remove_track_at(&client, &did, 1).await?; 333 + let queue = get_queue(&client, &did).await?; 334 + assert_eq!(queue, vec!["track:67890", "track:67892"]); 335 + 336 + remove_track_at(&client, &did, 0).await?; 337 + let queue = get_queue(&client, &did).await?; 338 + assert_eq!(queue, vec!["track:67892"]); 339 + 340 + remove_track_at(&client, &did, 5).await?; 341 + let queue = get_queue(&client, &did).await?; 342 + assert_eq!(queue, vec!["track:67892"]); 343 + 344 + let new_did = Uuid::new_v4().to_string(); 345 + remove_track_at(&client, &new_did, 0).await?; 346 + let queue = get_queue(&client, &new_did).await?; 347 + assert_eq!(queue, Vec::<String>::new()); 348 + 349 + cleanup(&client, &did).await?; 350 + cleanup(&client, &new_did).await?; 351 + Ok(()) 352 + } 353 + 354 + #[tokio::test] 355 + async fn test_shuffle_queue() -> Result<(), Error> { 356 + let client = setup_redis().await; 357 + let did = Uuid::new_v4().to_string(); 358 + let track_ids = vec!["track:67890", "track:67891", "track:67892"]; 359 + 360 + for &track_id in &track_ids { 361 + add_track(&client, &did, track_id).await?; 362 + } 363 + 364 + shuffle_queue(&client, &did).await?; 365 + let queue = get_queue(&client, &did).await?; 366 + assert_eq!(queue.len(), track_ids.len()); 367 + assert!(track_ids.iter().all(|id| queue.contains(&id.to_string()))); 368 + 369 + cleanup(&client, &did).await?; 370 + Ok(()) 371 + } 372 + 373 + #[tokio::test] 374 + async fn test_get_queue() -> Result<(), Error> { 375 + let client = setup_redis().await; 376 + let did = Uuid::new_v4().to_string(); 377 + let track_ids = vec!["track:67890", "track:67891"]; 378 + 379 + let queue = get_queue(&client, &did).await?; 380 + assert_eq!(queue, Vec::<String>::new()); 381 + 382 + for &track_id in &track_ids { 383 + add_track(&client, &did, track_id).await?; 384 + } 385 + 386 + let queue = get_queue(&client, &did).await?; 387 + assert_eq!(queue, track_ids); 388 + 389 + cleanup(&client, &did).await?; 390 + Ok(()) 391 + } 392 + 393 + #[tokio::test] 394 + async fn test_clear_queue() -> Result<(), Error> { 395 + let client = setup_redis().await; 396 + let did = Uuid::new_v4().to_string(); 397 + let track_ids = vec!["track:67890", "track:67891"]; 398 + 399 + for &track_id in &track_ids { 400 + add_track(&client, &did, track_id).await?; 401 + } 402 + 403 + clear_queue(&client, &did).await?; 404 + let queue = get_queue(&client, &did).await?; 405 + assert_eq!(queue, Vec::<String>::new()); 406 + 407 + clear_queue(&client, &did).await?; 408 + let queue = get_queue(&client, &did).await?; 409 + assert_eq!(queue, Vec::<String>::new()); 410 + 411 + cleanup(&client, &did).await?; 412 + Ok(()) 413 + } 414 + 415 + #[tokio::test] 416 + async fn test_queue_length_and_empty() -> Result<(), Error> { 417 + let client = setup_redis().await; 418 + let did = Uuid::new_v4().to_string(); 419 + let track_ids = vec!["track:67890", "track:67891"]; 420 + 421 + let length = get_queue_length(&client, &did).await?; 422 + assert_eq!(length, 0); 423 + let is_empty = is_queue_empty(&client, &did).await?; 424 + assert!(is_empty); 425 + 426 + for &track_id in &track_ids { 427 + add_track(&client, &did, track_id).await?; 428 + } 429 + 430 + let length = get_queue_length(&client, &did).await?; 431 + assert_eq!(length, track_ids.len()); 432 + let is_empty = is_queue_empty(&client, &did).await?; 433 + assert!(!is_empty); 434 + 435 + cleanup(&client, &did).await?; 436 + Ok(()) 437 + } 438 + 439 + #[tokio::test] 440 + async fn test_current_track() -> Result<(), Error> { 441 + let client = setup_redis().await; 442 + let did = Uuid::new_v4().to_string(); 443 + let track_ids = vec!["track:67890", "track:67891"]; 444 + for &track_id in &track_ids { 445 + add_track(&client, &did, track_id).await?; 446 + } 447 + let current = get_current_track(&client, &did).await?; 448 + assert_eq!(current, None); 449 + set_current_track(&client, &did, 1).await?; 450 + let current = get_current_track(&client, &did).await?; 451 + assert_eq!(current, Some(1)); 452 + clear_current_track(&client, &did).await?; 453 + let current = get_current_track(&client, &did).await?; 454 + assert_eq!(current, None); 455 + cleanup(&client, &did).await?; 456 + Ok(()) 457 + } 458 + 459 + #[tokio::test] 460 + async fn test_move_track() -> Result<(), Error> { 461 + let client = setup_redis().await; 462 + let did = Uuid::new_v4().to_string(); 463 + let track_ids = vec!["track:67890", "track:67891", "track:67892"]; 464 + 465 + for &track_id in &track_ids { 466 + add_track(&client, &did, track_id).await?; 467 + } 468 + 469 + move_track(&client, &did, 0, 2).await?; 470 + let queue = get_queue(&client, &did).await?; 471 + assert_eq!(queue, vec!["track:67891", "track:67892", "track:67890"]); 472 + move_track(&client, &did, 2, 0).await?; 473 + 474 + let queue = get_queue(&client, &did).await?; 475 + assert_eq!(queue, vec!["track:67890", "track:67891", "track:67892"]); 476 + move_track(&client, &did, 1, 1).await?; 477 + 478 + let queue = get_queue(&client, &did).await?; 479 + assert_eq!(queue, vec!["track:67890", "track:67891", "track:67892"]); 480 + move_track(&client, &did, 5, 0).await?; 481 + 482 + let queue = get_queue(&client, &did).await?; 483 + assert_eq!(queue, vec!["track:67890", "track:67891", "track:67892"]); 484 + 485 + let new_did = Uuid::new_v4().to_string(); 486 + move_track(&client, &new_did, 0, 1).await?; 487 + 488 + let queue = get_queue(&client, &new_did).await?; 489 + assert_eq!(queue, Vec::<String>::new()); 490 + 491 + cleanup(&client, &did).await?; 492 + cleanup(&client, &new_did).await?; 493 + 494 + Ok(()) 495 + } 496 + 497 + #[tokio::test] 498 + async fn test_replace_queue() -> Result<(), Error> { 499 + let client = setup_redis().await; 500 + let did = Uuid::new_v4().to_string(); 501 + let initial_tracks = vec!["track:67890", "track:67891"]; 502 + 503 + for &track_id in &initial_tracks { 504 + add_track(&client, &did, track_id).await?; 505 + } 506 + 507 + let new_queue = vec![ 508 + "track:67892".to_string(), 509 + "track:67893".to_string(), 510 + "track:67894".to_string(), 511 + ]; 512 + 513 + replace_queue(&client, &did, new_queue.clone()).await?; 514 + let queue = get_queue(&client, &did).await?; 515 + 516 + assert_eq!(queue, new_queue); 517 + cleanup(&client, &did).await?; 518 + Ok(()) 519 + } 520 + 521 + #[tokio::test] 522 + async fn test_get_track_at() -> Result<(), Error> { 523 + let client = setup_redis().await; 524 + let did = Uuid::new_v4().to_string(); 525 + let track_ids = vec!["track:67890", "track:67891", "track:67892"]; 526 + 527 + for &track_id in &track_ids { 528 + add_track(&client, &did, track_id).await?; 529 + } 530 + 531 + let track = get_track_at(&client, &did, 1).await?; 532 + assert_eq!(track, Some("track:67891".to_string())); 533 + 534 + let track = get_track_at(&client, &did, 5).await?; 535 + assert_eq!(track, None); 536 + 537 + let new_did = Uuid::new_v4().to_string(); 538 + let track = get_track_at(&client, &new_did, 0).await?; 539 + assert_eq!(track, None); 540 + 541 + cleanup(&client, &did).await?; 542 + cleanup(&client, &new_did).await?; 543 + 544 + Ok(()) 545 + } 546 + 547 + #[tokio::test] 548 + async fn test_insert_tracks_at() -> Result<(), Error> { 549 + let client = setup_redis().await; 550 + let did = Uuid::new_v4().to_string(); 551 + let initial_tracks = vec!["track:67890", "track:67891"]; 552 + 553 + for &track_id in &initial_tracks { 554 + add_track(&client, &did, track_id).await?; 555 + } 556 + 557 + let new_tracks = vec!["track:67892".to_string(), "track:67893".to_string()]; 558 + insert_tracks_at(&client, &did, 1, new_tracks.clone()).await?; 559 + let queue = get_queue(&client, &did).await?; 560 + 561 + assert_eq!( 562 + queue, 563 + vec!["track:67890", "track:67892", "track:67893", "track:67891"] 564 + ); 565 + 566 + let end_tracks = vec!["track:67894".to_string()]; 567 + insert_tracks_at(&client, &did, 10, end_tracks.clone()).await?; 568 + let queue = get_queue(&client, &did).await?; 569 + assert_eq!( 570 + queue, 571 + vec![ 572 + "track:67890", 573 + "track:67892", 574 + "track:67893", 575 + "track:67891", 576 + "track:67894" 577 + ] 578 + ); 579 + let new_did = Uuid::new_v4().to_string(); 580 + let new_tracks = vec!["track:67895".to_string(), "track:67896".to_string()]; 581 + insert_tracks_at(&client, &new_did, 0, new_tracks.clone()).await?; 582 + 583 + let queue = get_queue(&client, &new_did).await?; 584 + assert_eq!(queue, new_tracks); 585 + 586 + cleanup(&client, &did).await?; 587 + cleanup(&client, &new_did).await?; 588 + Ok(()) 589 + } 590 + 591 + #[tokio::test] 592 + async fn test_concurrent_operations() -> Result<(), Error> { 593 + let client = setup_redis().await; 594 + let did = Uuid::new_v4().to_string(); 595 + let track_ids = vec!["track:67890", "track:67891", "track:67892"]; 596 + 597 + let add_task = add_track(&client, &did, track_ids[0]); 598 + let insert_task = insert_track_at(&client, &did, 0, track_ids[1]); 599 + let remove_task = remove_track_at(&client, &did, 0); 600 + tokio::try_join!(add_task, insert_task, remove_task)?; 601 + 602 + let queue = get_queue(&client, &did).await?; 603 + assert!(queue.len() <= 2); 604 + assert!(track_ids.iter().any(|id| queue.contains(&id.to_string()))); 605 + 606 + cleanup(&client, &did).await?; 607 + Ok(()) 608 + } 609 + }
+60
crates/tracklist/src/server.rs
··· 1 + use std::{env, sync::Arc}; 2 + 3 + use actix_web::{ 4 + get, post, 5 + web::{self, Data}, 6 + App, HttpRequest, HttpResponse, HttpServer, Responder, 7 + }; 8 + use anyhow::Error; 9 + use owo_colors::OwoColorize; 10 + use serde_json::json; 11 + 12 + use crate::handlers::handle; 13 + 14 + #[get("/")] 15 + async fn index(_req: HttpRequest) -> HttpResponse { 16 + HttpResponse::Ok().json(json!({ 17 + "server": "Rocksky Tracklist Server", 18 + "version": "0.1.0", 19 + })) 20 + } 21 + 22 + #[post("/{method}")] 23 + async fn call_method( 24 + data: web::Data<Arc<redis::Client>>, 25 + mut payload: web::Payload, 26 + req: HttpRequest, 27 + ) -> Result<impl Responder, actix_web::Error> { 28 + let method = req.match_info().get("method").unwrap_or("unknown"); 29 + println!("Method: {}", method.bright_green()); 30 + 31 + let conn = data.get_ref().clone(); 32 + handle(method, &mut payload, &req, conn) 33 + .await 34 + .map_err(actix_web::error::ErrorInternalServerError) 35 + } 36 + 37 + pub async fn run() -> Result<(), Error> { 38 + let host = env::var("TRACKLIST_HOST").unwrap_or_else(|_| "127.0.0.1".to_string()); 39 + let port = env::var("TRACKLIST_PORT").unwrap_or_else(|_| "7884".to_string()); 40 + let addr = format!("{}:{}", host, port); 41 + 42 + let url = format!("http://{}", addr); 43 + println!("Listening on {}", url.bright_green()); 44 + 45 + let client = redis::Client::open(env::var("REDIS_URL").unwrap_or("redis://127.0.0.1".into()))?; 46 + let conn = Arc::new(client); 47 + 48 + HttpServer::new(move || { 49 + App::new() 50 + .app_data(Data::new(conn.clone())) 51 + .service(index) 52 + .service(call_method) 53 + }) 54 + .bind(&addr)? 55 + .run() 56 + .await 57 + .map_err(Error::new)?; 58 + 59 + Ok(()) 60 + }
+87
crates/tracklist/src/types.rs
··· 1 + use serde::{Deserialize, Serialize}; 2 + 3 + #[derive(Debug, Serialize, Deserialize)] 4 + pub struct AddTrackParams { 5 + pub did: String, 6 + pub track_id: String, 7 + } 8 + 9 + #[derive(Debug, Serialize, Deserialize)] 10 + pub struct InsertTrackAtParams { 11 + pub did: String, 12 + pub track_id: String, 13 + pub index: usize, 14 + } 15 + 16 + #[derive(Debug, Serialize, Deserialize)] 17 + pub struct RemoveTrackAtParams { 18 + pub did: String, 19 + pub index: usize, 20 + } 21 + 22 + #[derive(Debug, Serialize, Deserialize)] 23 + pub struct ShuffleQueueParams { 24 + pub did: String, 25 + } 26 + 27 + #[derive(Debug, Serialize, Deserialize)] 28 + pub struct GetQueueParams { 29 + pub did: String, 30 + } 31 + 32 + #[derive(Debug, Serialize, Deserialize)] 33 + pub struct ClearQueueParams { 34 + pub did: String, 35 + } 36 + 37 + #[derive(Debug, Serialize, Deserialize)] 38 + pub struct GetQueueLengthParams { 39 + pub did: String, 40 + } 41 + 42 + #[derive(Debug, Serialize, Deserialize)] 43 + pub struct IsQueueEmptyParams { 44 + pub did: String, 45 + } 46 + 47 + #[derive(Debug, Serialize, Deserialize)] 48 + pub struct SetCurrentTrackParams { 49 + pub did: String, 50 + pub index: usize, 51 + } 52 + 53 + #[derive(Debug, Serialize, Deserialize)] 54 + pub struct GetCurrentTrackParams { 55 + pub did: String, 56 + } 57 + 58 + #[derive(Debug, Serialize, Deserialize)] 59 + pub struct ClearCurrentTrackParams { 60 + pub did: String, 61 + } 62 + 63 + #[derive(Debug, Serialize, Deserialize)] 64 + pub struct MoveTrackParams { 65 + pub did: String, 66 + pub from: usize, 67 + pub to: usize, 68 + } 69 + 70 + #[derive(Debug, Serialize, Deserialize)] 71 + pub struct ReplaceQueueParams { 72 + pub did: String, 73 + pub track_ids: Vec<String>, 74 + } 75 + 76 + #[derive(Debug, Serialize, Deserialize)] 77 + pub struct GetTrackAtParams { 78 + pub did: String, 79 + pub index: usize, 80 + } 81 + 82 + #[derive(Debug, Serialize, Deserialize)] 83 + pub struct InsertTracksAtParams { 84 + pub did: String, 85 + pub track_ids: Vec<String>, 86 + pub index: usize, 87 + }
+1 -1
crates/webscrobbler/Cargo.toml
··· 1 1 [package] 2 - name = "webscrobbler" 2 + name = "rocksky-webscrobbler" 3 3 version = "0.1.0" 4 4 authors.workspace = true 5 5 edition.workspace = true
+10
crates/webscrobbler/src/consts.rs
··· 1 + pub const BANNER: &str = r#" 2 + _ __ __ _____ __ __ __ 3 + | | / /__ / /_ / ___/______________ / /_ / /_ / /__ _____ 4 + | | /| / / _ \/ __ \\__ \/ ___/ ___/ __ \/ __ \/ __ \/ / _ \/ ___/ 5 + | |/ |/ / __/ /_/ /__/ / /__/ / / /_/ / /_/ / /_/ / / __/ / 6 + |__/|__/\___/_.___/____/\___/_/ \____/_.___/_.___/_/\___/_/ 7 + 8 + 9 + This is the Rocksky WebScrobbler Webhook API compatible with webscrobbler extension. 10 + "#;
+1 -1
crates/webscrobbler/src/handlers.rs
··· 1 - use crate::{cache::Cache, repo, scrobbler::scrobble, types::ScrobbleRequest, BANNER}; 1 + use crate::{cache::Cache, consts::BANNER, repo, scrobbler::scrobble, types::ScrobbleRequest}; 2 2 use actix_web::{get, post, web, HttpRequest, HttpResponse, Responder}; 3 3 use owo_colors::OwoColorize; 4 4 use sqlx::{Pool, Postgres};
+78
crates/webscrobbler/src/lib.rs
··· 1 + use std::{env, sync::Arc, time::Duration}; 2 + 3 + use actix_limitation::{Limiter, RateLimiter}; 4 + use actix_session::SessionExt; 5 + use actix_web::{ 6 + dev::ServiceRequest, 7 + web::{self, Data}, 8 + App, HttpServer, 9 + }; 10 + use anyhow::Error; 11 + use owo_colors::OwoColorize; 12 + use sqlx::postgres::PgPoolOptions; 13 + 14 + use crate::{cache::Cache, consts::BANNER}; 15 + 16 + pub mod auth; 17 + pub mod cache; 18 + pub mod consts; 19 + pub mod crypto; 20 + pub mod handlers; 21 + pub mod musicbrainz; 22 + pub mod repo; 23 + pub mod rocksky; 24 + pub mod scrobbler; 25 + pub mod spotify; 26 + pub mod types; 27 + pub mod xata; 28 + 29 + pub async fn start_server() -> Result<(), Error> { 30 + println!("{}", BANNER.magenta()); 31 + 32 + let cache = Cache::new()?; 33 + 34 + let pool = PgPoolOptions::new() 35 + .max_connections(5) 36 + .connect(&env::var("XATA_POSTGRES_URL")?) 37 + .await?; 38 + 39 + let conn = Arc::new(pool); 40 + 41 + let host = env::var("WEBSCROBBLER_HOST").unwrap_or_else(|_| "127.0.0.1".to_string()); 42 + let port = env::var("WEBSCROBBLER_PORT") 43 + .unwrap_or_else(|_| "7883".to_string()) 44 + .parse::<u16>() 45 + .unwrap_or(7883); 46 + 47 + println!( 48 + "Starting WebScrobbler Webhook @ {}", 49 + format!("{}:{}", host, port).green() 50 + ); 51 + 52 + let limiter = web::Data::new( 53 + Limiter::builder("redis://127.0.0.1") 54 + .key_by(|req: &ServiceRequest| { 55 + req.get_session() 56 + .get(&"session-id") 57 + .unwrap_or_else(|_| req.cookie(&"rate-api-id").map(|c| c.to_string())) 58 + }) 59 + .limit(100) 60 + .period(Duration::from_secs(60)) // 60 minutes 61 + .build() 62 + .unwrap(), 63 + ); 64 + 65 + HttpServer::new(move || { 66 + App::new() 67 + .wrap(RateLimiter::default()) 68 + .app_data(limiter.clone()) 69 + .app_data(Data::new(conn.clone())) 70 + .app_data(Data::new(cache.clone())) 71 + .service(handlers::index) 72 + .service(handlers::handle_scrobble) 73 + }) 74 + .bind((host, port))? 75 + .run() 76 + .await?; 77 + Ok(()) 78 + }
+2 -82
crates/webscrobbler/src/main.rs
··· 1 - use std::{env, sync::Arc, time::Duration}; 2 - 3 - use actix_limitation::{Limiter, RateLimiter}; 4 - use actix_session::SessionExt as _; 5 - use actix_web::{ 6 - dev::ServiceRequest, 7 - web::{self, Data}, 8 - App, HttpServer, 9 - }; 10 1 use anyhow::Error; 11 - use cache::Cache; 12 2 use dotenv::dotenv; 13 - use owo_colors::OwoColorize; 14 - use sqlx::postgres::PgPoolOptions; 15 - 16 - pub mod auth; 17 - pub mod cache; 18 - pub mod crypto; 19 - pub mod handlers; 20 - pub mod musicbrainz; 21 - pub mod repo; 22 - pub mod rocksky; 23 - pub mod scrobbler; 24 - pub mod spotify; 25 - pub mod types; 26 - pub mod xata; 27 - 28 - pub const BANNER: &str = r#" 29 - _ __ __ _____ __ __ __ 30 - | | / /__ / /_ / ___/______________ / /_ / /_ / /__ _____ 31 - | | /| / / _ \/ __ \\__ \/ ___/ ___/ __ \/ __ \/ __ \/ / _ \/ ___/ 32 - | |/ |/ / __/ /_/ /__/ / /__/ / / /_/ / /_/ / /_/ / / __/ / 33 - |__/|__/\___/_.___/____/\___/_/ \____/_.___/_.___/_/\___/_/ 34 - 35 - 36 - This is the Rocksky WebScrobbler Webhook API compatible with webscrobbler extension. 37 - "#; 3 + use rocksky_webscrobbler::start_server; 38 4 39 5 #[tokio::main] 40 6 async fn main() -> Result<(), Error> { 41 7 dotenv().ok(); 42 8 43 - println!("{}", BANNER.magenta()); 44 - 45 - let cache = Cache::new()?; 46 - 47 - let pool = PgPoolOptions::new() 48 - .max_connections(5) 49 - .connect(&env::var("XATA_POSTGRES_URL")?) 50 - .await?; 51 - 52 - let conn = Arc::new(pool); 53 - 54 - let host = env::var("WEBSCROBBLER_HOST").unwrap_or_else(|_| "127.0.0.1".to_string()); 55 - let port = env::var("WEBSCROBBLER_PORT") 56 - .unwrap_or_else(|_| "7883".to_string()) 57 - .parse::<u16>() 58 - .unwrap_or(7883); 59 - 60 - println!( 61 - "Starting WebScrobbler Webhook @ {}", 62 - format!("{}:{}", host, port).green() 63 - ); 64 - 65 - let limiter = web::Data::new( 66 - Limiter::builder("redis://127.0.0.1") 67 - .key_by(|req: &ServiceRequest| { 68 - req.get_session() 69 - .get(&"session-id") 70 - .unwrap_or_else(|_| req.cookie(&"rate-api-id").map(|c| c.to_string())) 71 - }) 72 - .limit(100) 73 - .period(Duration::from_secs(60)) // 60 minutes 74 - .build() 75 - .unwrap(), 76 - ); 77 - 78 - HttpServer::new(move || { 79 - App::new() 80 - .wrap(RateLimiter::default()) 81 - .app_data(limiter.clone()) 82 - .app_data(Data::new(conn.clone())) 83 - .app_data(Data::new(cache.clone())) 84 - .service(handlers::index) 85 - .service(handlers::handle_scrobble) 86 - }) 87 - .bind((host, port))? 88 - .run() 89 - .await?; 9 + start_server().await?; 90 10 91 11 Ok(()) 92 12 }