audio streaming app plyr.fm

perf: fix slow homepage load times (#1025)

* perf: fix slow homepage load times with SWR caching and pool warmup

follow graph: stale-while-revalidate pattern (TTL 60min, stale at 8min)
returns cached data immediately, schedules background re-warm when stale.
removes redundant login-time cache warming from auth paths.

track listing: cache anonymous first-page discovery feed in Redis (60s TTL)
with invalidation on upload, delete, and edit.

connection pool: warm one connection at startup to eliminate cold connect
penalty on first request after deploy.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: remove stale schedule_follow_graph_warm mock from test

the import was removed from auth.py, so the test mock path no longer exists.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: narrow bare except Exception to specific types

- Redis operations: catch (RuntimeError, RedisError) instead of Exception
- DB pool warmup: catch (OSError, SQLAlchemyError) instead of Exception
- Move deferred imports in main.py to top level
- Update tests to use redis.exceptions.ConnectionError

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>

authored by zzstoatzz.io

Claude Opus 4.6 and committed by
GitHub
5153d514 4dd6ed45

+315 -43
+44 -8
backend/src/backend/_internal/follow_graph.py
··· 1 - """bluesky follow graph with Redis read-through caching.""" 1 + """bluesky follow graph with Redis read-through caching and stale-while-revalidate.""" 2 2 3 3 import json 4 4 import logging 5 + import time 5 6 from dataclasses import asdict, dataclass 6 7 7 8 import httpx 8 9 import logfire 10 + from redis.exceptions import RedisError 9 11 10 12 from backend._internal.atproto.profile import BSKY_API_BASE, normalize_avatar_url 11 13 from backend.utilities.redis import get_async_redis_client ··· 13 15 logger = logging.getLogger(__name__) 14 16 15 17 FOLLOWS_CACHE_PREFIX = "plyr:follows:" 16 - FOLLOWS_CACHE_TTL_SECONDS = 600 # 10 minutes 18 + FOLLOWS_TIMESTAMP_PREFIX = "plyr:follows:ts:" 19 + FOLLOWS_REVALIDATING_PREFIX = "plyr:follows:revalidating:" 20 + FOLLOWS_CACHE_TTL_SECONDS = 3600 # 60 minutes 21 + FOLLOWS_STALE_AFTER_SECONDS = 480 # 8 minutes 17 22 18 23 19 24 @dataclass(frozen=True, slots=True) ··· 27 32 async def get_follows(did: str) -> dict[str, FollowInfo]: 28 33 """get all DIDs a user follows on bluesky, with Redis read-through cache. 29 34 30 - checks Redis first, falls back to live Bluesky API on miss, 31 - then writes back to cache. fails silently on Redis errors. 35 + uses stale-while-revalidate: returns cached data immediately even if stale, 36 + and schedules a background re-warm when data is older than FOLLOWS_STALE_AFTER_SECONDS. 37 + on cache miss, fetches live from Bluesky and writes back. fails silently on Redis errors. 32 38 """ 33 39 cache_key = f"{FOLLOWS_CACHE_PREFIX}{did}" 40 + ts_key = f"{FOLLOWS_TIMESTAMP_PREFIX}{did}" 34 41 35 42 # try cache 36 43 try: 37 44 redis = get_async_redis_client() 38 45 if cached := await redis.get(cache_key): 39 - return _deserialize_follows(cached) 40 - except Exception: 46 + follows = _deserialize_follows(cached) 47 + 48 + # check staleness — schedule background re-warm if stale 49 + try: 50 + ts_raw = await redis.get(ts_key) 51 + if ts_raw and time.time() - float(ts_raw) > FOLLOWS_STALE_AFTER_SECONDS: 52 + await _maybe_schedule_revalidation(did) 53 + except (RuntimeError, RedisError): 54 + logger.debug("redis staleness check failed for follows %s", did) 55 + 56 + return follows 57 + except (RuntimeError, RedisError): 41 58 logger.debug("redis cache read failed for follows %s", did) 42 59 43 60 # cache miss — fetch live 44 61 follows = await _fetch_follows_from_bsky(did) 45 62 46 - # write back 63 + # write back with timestamp 47 64 try: 48 65 redis = get_async_redis_client() 49 66 await redis.set( 50 67 cache_key, _serialize_follows(follows), ex=FOLLOWS_CACHE_TTL_SECONDS 51 68 ) 52 - except Exception: 69 + await redis.set(ts_key, str(time.time()), ex=FOLLOWS_CACHE_TTL_SECONDS) 70 + except (RuntimeError, RedisError): 53 71 logger.debug("redis cache write failed for follows %s", did) 54 72 55 73 return follows 56 74 57 75 76 + async def _maybe_schedule_revalidation(did: str) -> None: 77 + """schedule a background re-warm if no revalidation is already in progress. 78 + 79 + uses SET NX on a revalidating key to dedup concurrent requests. 80 + """ 81 + revalidating_key = f"{FOLLOWS_REVALIDATING_PREFIX}{did}" 82 + try: 83 + redis = get_async_redis_client() 84 + if await redis.set(revalidating_key, "1", nx=True, ex=60): 85 + from backend._internal.tasks import schedule_follow_graph_warm 86 + 87 + await schedule_follow_graph_warm(did) 88 + except (RuntimeError, RedisError): 89 + logger.debug("failed to schedule revalidation for follows %s", did) 90 + 91 + 58 92 async def warm_follows_cache(did: str) -> None: 59 93 """always fetch from bluesky and write to Redis. called from background task.""" 60 94 follows = await _fetch_follows_from_bsky(did) 61 95 cache_key = f"{FOLLOWS_CACHE_PREFIX}{did}" 96 + ts_key = f"{FOLLOWS_TIMESTAMP_PREFIX}{did}" 62 97 try: 63 98 redis = get_async_redis_client() 64 99 await redis.set( 65 100 cache_key, _serialize_follows(follows), ex=FOLLOWS_CACHE_TTL_SECONDS 66 101 ) 102 + await redis.set(ts_key, str(time.time()), ex=FOLLOWS_CACHE_TTL_SECONDS) 67 103 logfire.info("warmed follows cache", did=did, count=len(follows)) 68 104 except Exception: 69 105 logger.debug("redis cache write failed warming follows for %s", did)
+1 -4
backend/src/backend/api/auth.py
··· 39 39 switch_active_account, 40 40 ) 41 41 from backend._internal.auth import get_refresh_token_lifetime_days 42 - from backend._internal.tasks import schedule_atproto_sync, schedule_follow_graph_warm 42 + from backend._internal.tasks import schedule_atproto_sync 43 43 from backend.config import settings 44 44 from backend.models import Artist, get_db 45 45 from backend.utilities.rate_limit import limiter ··· 249 249 250 250 # schedule ATProto sync (via docket if enabled, else asyncio) 251 251 await schedule_atproto_sync(session_id, did) 252 - await schedule_follow_graph_warm(did) 253 252 254 253 return RedirectResponse( 255 254 url=f"{settings.frontend.url}/settings?exchange_token={exchange_token}&scope_upgraded=true", ··· 276 275 277 276 # schedule ATProto sync 278 277 await schedule_atproto_sync(session_id, did) 279 - await schedule_follow_graph_warm(did) 280 278 281 279 return RedirectResponse( 282 280 url=f"{settings.frontend.url}/portal?exchange_token={exchange_token}&account_added=true", ··· 294 292 295 293 # schedule ATProto sync (via docket if enabled, else asyncio) 296 294 await schedule_atproto_sync(session_id, did) 297 - await schedule_follow_graph_warm(did) 298 295 299 296 # redirect to profile setup if needed, otherwise to portal 300 297 redirect_path = "/portal" if has_profile else "/profile/setup"
+2 -1
backend/src/backend/api/tracks/constants.py
··· 2 2 3 3 MAX_FEATURES = 5 4 4 5 - __all__ = ["MAX_FEATURES"] 5 + DISCOVERY_CACHE_KEY = "plyr:tracks:discovery" 6 + DISCOVERY_CACHE_TTL_SECONDS = 60
+41 -1
backend/src/backend/api/tracks/listing.py
··· 1 1 """Read-only track listing endpoints.""" 2 2 3 3 import asyncio 4 + import logging 4 5 from datetime import datetime 5 6 from typing import TYPE_CHECKING, Annotated, cast 6 7 ··· 8 9 from botocore.exceptions import ClientError 9 10 from fastapi import Depends, HTTPException, Query 10 11 from pydantic import BaseModel 12 + from redis.exceptions import RedisError 11 13 from sqlalchemy import func, select 12 14 from sqlalchemy.ext.asyncio import AsyncSession 13 15 from sqlalchemy.orm import selectinload ··· 34 36 get_top_tracks_with_counts, 35 37 get_track_tags, 36 38 ) 39 + from backend.utilities.redis import get_async_redis_client 37 40 from backend.utilities.tags import DEFAULT_HIDDEN_TAGS 38 41 42 + from .constants import DISCOVERY_CACHE_KEY, DISCOVERY_CACHE_TTL_SECONDS 39 43 from .router import router 40 44 45 + logger = logging.getLogger(__name__) 46 + 47 + 48 + async def invalidate_tracks_discovery_cache() -> None: 49 + """delete the anonymous discovery feed cache key.""" 50 + try: 51 + redis = get_async_redis_client() 52 + await redis.delete(DISCOVERY_CACHE_KEY) 53 + except (RuntimeError, RedisError): 54 + logger.debug("failed to invalidate discovery cache") 55 + 56 + 41 57 if TYPE_CHECKING: 42 58 from backend.storage.r2 import R2Storage 43 59 ··· 80 96 Pass this to get the next page of results. 81 97 limit: Maximum number of tracks to return (default from settings, max 100). 82 98 """ 99 + # anonymous first-page discovery feed — serve from cache if available 100 + is_cacheable = session is None and artist_did is None and cursor is None 101 + if is_cacheable: 102 + try: 103 + redis = get_async_redis_client() 104 + if cached := await redis.get(DISCOVERY_CACHE_KEY): 105 + return TracksListResponse.model_validate_json(cached) 106 + except (RuntimeError, RedisError): 107 + logger.debug("discovery cache read failed") 108 + 83 109 # use settings default if not provided, clamp to reasonable bounds 84 110 if limit is None: 85 111 limit = settings.app.default_page_size ··· 280 306 ] 281 307 ) 282 308 283 - return TracksListResponse( 309 + response = TracksListResponse( 284 310 tracks=list(track_responses), 285 311 next_cursor=next_cursor, 286 312 has_more=has_more, 287 313 ) 314 + 315 + # write back to cache for anonymous first-page requests 316 + if is_cacheable: 317 + try: 318 + redis = get_async_redis_client() 319 + await redis.set( 320 + DISCOVERY_CACHE_KEY, 321 + response.model_dump_json(), 322 + ex=DISCOVERY_CACHE_TTL_SECONDS, 323 + ) 324 + except (RuntimeError, RedisError): 325 + logger.debug("discovery cache write failed") 326 + 327 + return response 288 328 289 329 290 330 @router.get("/top")
+7
backend/src/backend/api/tracks/mutations.py
··· 38 38 from backend.storage import storage 39 39 from backend.utilities.tags import get_or_create_tag, parse_tags_json 40 40 41 + from .listing import invalidate_tracks_discovery_cache 41 42 from .metadata_service import ( 42 43 apply_album_update, 43 44 resolve_feature_handles, ··· 125 126 # delete track record 126 127 await db.delete(track) 127 128 await db.commit() 129 + 130 + # invalidate anonymous discovery feed cache 131 + await invalidate_tracks_discovery_cache() 128 132 129 133 # sync album list record if track was in an album 130 134 if album_id_to_sync: ··· 307 311 308 312 await db.commit() 309 313 await db.refresh(track) 314 + 315 + # invalidate anonymous discovery feed cache 316 + await invalidate_tracks_discovery_cache() 310 317 311 318 # invalidate album cache if track metadata changed within an album 312 319 from backend.api.albums import invalidate_album_cache_by_id
+4
backend/src/backend/api/tracks/uploads.py
··· 59 59 from backend.utilities.rate_limit import limiter 60 60 from backend.utilities.tags import add_tags_to_track, parse_tags_json 61 61 62 + from .listing import invalidate_tracks_discovery_cache 62 63 from .router import router 63 64 from .services import get_or_create_album 64 65 ··· 816 817 817 818 # phase 7: post-upload tasks (tags, notifications, background jobs) 818 819 await _schedule_post_upload(ctx, sr, track) 820 + 821 + # invalidate anonymous discovery feed cache 822 + await invalidate_tracks_discovery_cache() 819 823 820 824 await job_service.update_progress( 821 825 ctx.upload_id,
+12
backend/src/backend/main.py
··· 11 11 from slowapi import _rate_limit_exceeded_handler 12 12 from slowapi.errors import RateLimitExceeded 13 13 from slowapi.middleware import SlowAPIMiddleware 14 + from sqlalchemy import text 15 + from sqlalchemy.exc import SQLAlchemyError 14 16 15 17 from backend._internal import jam_service, notification_service, queue_service 16 18 from backend._internal.background import background_worker_lifespan ··· 39 41 from backend.api.lists import router as lists_router 40 42 from backend.api.migration import router as migration_router 41 43 from backend.config import settings 44 + from backend.utilities.database import get_engine 42 45 from backend.utilities.middleware import SecurityHeadersMiddleware 43 46 from backend.utilities.observability import ( 44 47 configure_observability, ··· 63 66 await notification_service.setup() 64 67 await queue_service.setup() 65 68 await jam_service.setup() 69 + 70 + # warm the database connection pool so the first request avoids cold connect 71 + try: 72 + engine = get_engine() 73 + async with engine.connect() as conn: 74 + await conn.execute(text("SELECT 1")) 75 + logger.info("database connection pool warmed") 76 + except (OSError, SQLAlchemyError): 77 + logger.warning("failed to warm database connection pool") 66 78 67 79 # start background task worker (docket) 68 80 async with background_worker_lifespan() as docket:
+156 -27
backend/tests/_internal/test_follow_graph.py
··· 1 1 """tests for bluesky follow graph caching.""" 2 2 3 + import time 3 4 from unittest.mock import AsyncMock, patch 4 5 5 6 import pytest 7 + from redis.exceptions import ConnectionError as RedisConnectionError 6 8 7 9 from backend._internal.follow_graph import ( 8 10 FOLLOWS_CACHE_PREFIX, 9 11 FOLLOWS_CACHE_TTL_SECONDS, 12 + FOLLOWS_REVALIDATING_PREFIX, 13 + FOLLOWS_STALE_AFTER_SECONDS, 14 + FOLLOWS_TIMESTAMP_PREFIX, 10 15 FollowInfo, 11 16 _deserialize_follows, 12 17 _serialize_follows, ··· 19 24 "did:plc:bob": FollowInfo(index=1, avatar_url=None), 20 25 } 21 26 27 + TEST_DID = "did:plc:test" 28 + 22 29 23 30 def test_serialization_roundtrip() -> None: 24 31 """serialize -> deserialize preserves data.""" ··· 36 43 def mock_redis() -> AsyncMock: 37 44 redis = AsyncMock() 38 45 redis.get = AsyncMock(return_value=None) 39 - redis.set = AsyncMock() 46 + redis.set = AsyncMock(return_value=True) 40 47 return redis 41 48 42 49 ··· 45 52 return AsyncMock(return_value=SAMPLE_FOLLOWS) 46 53 47 54 55 + def _make_redis_getter(cache_data: str | None = None, timestamp: str | None = None): 56 + """create a side_effect for redis.get that returns different values per key.""" 57 + cache_key = f"{FOLLOWS_CACHE_PREFIX}{TEST_DID}" 58 + ts_key = f"{FOLLOWS_TIMESTAMP_PREFIX}{TEST_DID}" 59 + 60 + async def _get(key: str) -> str | None: 61 + if key == cache_key: 62 + return cache_data 63 + if key == ts_key: 64 + return timestamp 65 + return None 66 + 67 + return _get 68 + 69 + 48 70 async def test_cache_hit(mock_redis: AsyncMock) -> None: 49 - """returns cached data without calling bluesky.""" 50 - mock_redis.get.return_value = _serialize_follows(SAMPLE_FOLLOWS) 71 + """returns cached data without calling bluesky (fresh cache).""" 72 + mock_redis.get.side_effect = _make_redis_getter( 73 + cache_data=_serialize_follows(SAMPLE_FOLLOWS), 74 + timestamp=str(time.time()), # fresh 75 + ) 51 76 52 77 with ( 53 78 patch( ··· 56 81 ), 57 82 patch( 58 83 "backend._internal.follow_graph._fetch_follows_from_bsky", 59 - ) as mock_fetch, 84 + ) as mock_bsky, 60 85 ): 61 - result = await get_follows("did:plc:test") 86 + result = await get_follows(TEST_DID) 62 87 63 88 assert result == SAMPLE_FOLLOWS 64 - mock_fetch.assert_not_called() 89 + mock_bsky.assert_not_called() 65 90 66 91 67 92 async def test_cache_miss_fetches_and_writes( 68 93 mock_redis: AsyncMock, mock_fetch: AsyncMock 69 94 ) -> None: 70 - """on miss, fetches from bluesky and writes back to redis.""" 95 + """on miss, fetches from bluesky and writes data + timestamp to redis.""" 71 96 with ( 72 97 patch( 73 98 "backend._internal.follow_graph.get_async_redis_client", ··· 78 103 mock_fetch, 79 104 ), 80 105 ): 81 - result = await get_follows("did:plc:test") 106 + result = await get_follows(TEST_DID) 82 107 83 108 assert result == SAMPLE_FOLLOWS 84 - mock_fetch.assert_awaited_once_with("did:plc:test") 109 + mock_fetch.assert_awaited_once_with(TEST_DID) 85 110 86 - # verify cache write 87 - mock_redis.set.assert_awaited_once() 88 - call_args = mock_redis.set.call_args 89 - assert call_args[0][0] == f"{FOLLOWS_CACHE_PREFIX}did:plc:test" 90 - assert call_args[1]["ex"] == FOLLOWS_CACHE_TTL_SECONDS 111 + # verify both data and timestamp were written 112 + assert mock_redis.set.await_count == 2 91 113 92 - # verify written data roundtrips 93 - written = _deserialize_follows(call_args[0][1]) 94 - assert written == SAMPLE_FOLLOWS 114 + data_call = mock_redis.set.call_args_list[0] 115 + assert data_call[0][0] == f"{FOLLOWS_CACHE_PREFIX}{TEST_DID}" 116 + assert data_call[1]["ex"] == FOLLOWS_CACHE_TTL_SECONDS 117 + assert _deserialize_follows(data_call[0][1]) == SAMPLE_FOLLOWS 118 + 119 + ts_call = mock_redis.set.call_args_list[1] 120 + assert ts_call[0][0] == f"{FOLLOWS_TIMESTAMP_PREFIX}{TEST_DID}" 121 + assert ts_call[1]["ex"] == FOLLOWS_CACHE_TTL_SECONDS 95 122 96 123 97 124 async def test_redis_error_falls_back_to_live(mock_fetch: AsyncMock) -> None: 98 125 """redis errors fall back to live fetch without raising.""" 99 126 broken_redis = AsyncMock() 100 - broken_redis.get.side_effect = ConnectionError("redis down") 101 - broken_redis.set.side_effect = ConnectionError("redis down") 127 + broken_redis.get.side_effect = RedisConnectionError("redis down") 128 + broken_redis.set.side_effect = RedisConnectionError("redis down") 102 129 103 130 with ( 104 131 patch( ··· 110 137 mock_fetch, 111 138 ), 112 139 ): 113 - result = await get_follows("did:plc:test") 140 + result = await get_follows(TEST_DID) 114 141 115 142 assert result == SAMPLE_FOLLOWS 116 143 mock_fetch.assert_awaited_once() ··· 119 146 async def test_warm_writes_to_redis( 120 147 mock_redis: AsyncMock, mock_fetch: AsyncMock 121 148 ) -> None: 122 - """warm_follows_cache always fetches and writes to redis.""" 149 + """warm_follows_cache always fetches and writes data + timestamp to redis.""" 123 150 with ( 124 151 patch( 125 152 "backend._internal.follow_graph.get_async_redis_client", ··· 130 157 mock_fetch, 131 158 ), 132 159 ): 133 - await warm_follows_cache("did:plc:test") 160 + await warm_follows_cache(TEST_DID) 161 + 162 + mock_fetch.assert_awaited_once_with(TEST_DID) 163 + 164 + # data + timestamp = 2 set calls 165 + assert mock_redis.set.await_count == 2 166 + data_call = mock_redis.set.call_args_list[0] 167 + assert data_call[0][0] == f"{FOLLOWS_CACHE_PREFIX}{TEST_DID}" 168 + assert data_call[1]["ex"] == FOLLOWS_CACHE_TTL_SECONDS 169 + 170 + 171 + # --- stale-while-revalidate tests --- 172 + 173 + 174 + async def test_stale_cache_triggers_revalidation(mock_redis: AsyncMock) -> None: 175 + """cache >8min old returns data immediately and schedules a background re-warm.""" 176 + stale_ts = str(time.time() - FOLLOWS_STALE_AFTER_SECONDS - 10) 177 + mock_redis.get.side_effect = _make_redis_getter( 178 + cache_data=_serialize_follows(SAMPLE_FOLLOWS), 179 + timestamp=stale_ts, 180 + ) 181 + 182 + with ( 183 + patch( 184 + "backend._internal.follow_graph.get_async_redis_client", 185 + return_value=mock_redis, 186 + ), 187 + patch( 188 + "backend._internal.follow_graph._fetch_follows_from_bsky", 189 + ) as mock_bsky, 190 + patch( 191 + "backend._internal.tasks.schedule_follow_graph_warm", 192 + ) as mock_schedule, 193 + ): 194 + result = await get_follows(TEST_DID) 195 + 196 + # returns cached data without live fetch 197 + assert result == SAMPLE_FOLLOWS 198 + mock_bsky.assert_not_called() 199 + 200 + # acquired revalidation lock and scheduled re-warm 201 + mock_redis.set.assert_awaited_once_with( 202 + f"{FOLLOWS_REVALIDATING_PREFIX}{TEST_DID}", "1", nx=True, ex=60 203 + ) 204 + mock_schedule.assert_awaited_once_with(TEST_DID) 205 + 206 + 207 + async def test_fresh_cache_does_not_revalidate(mock_redis: AsyncMock) -> None: 208 + """cache <8min old does not trigger any background re-warm.""" 209 + fresh_ts = str(time.time() - 60) # 1 minute old 210 + mock_redis.get.side_effect = _make_redis_getter( 211 + cache_data=_serialize_follows(SAMPLE_FOLLOWS), 212 + timestamp=fresh_ts, 213 + ) 214 + 215 + with ( 216 + patch( 217 + "backend._internal.follow_graph.get_async_redis_client", 218 + return_value=mock_redis, 219 + ), 220 + patch( 221 + "backend._internal.follow_graph._fetch_follows_from_bsky", 222 + ) as mock_bsky, 223 + patch( 224 + "backend._internal.tasks.schedule_follow_graph_warm", 225 + ) as mock_schedule, 226 + ): 227 + result = await get_follows(TEST_DID) 228 + 229 + assert result == SAMPLE_FOLLOWS 230 + mock_bsky.assert_not_called() 231 + 232 + # no revalidation lock acquired, no re-warm scheduled 233 + mock_redis.set.assert_not_awaited() 234 + mock_schedule.assert_not_awaited() 235 + 236 + 237 + async def test_concurrent_revalidation_deduped(mock_redis: AsyncMock) -> None: 238 + """two stale requests only trigger one re-warm (SET NX dedup).""" 239 + stale_ts = str(time.time() - FOLLOWS_STALE_AFTER_SECONDS - 10) 240 + mock_redis.get.side_effect = _make_redis_getter( 241 + cache_data=_serialize_follows(SAMPLE_FOLLOWS), 242 + timestamp=stale_ts, 243 + ) 244 + 245 + # first call acquires lock, second is rejected 246 + mock_redis.set.side_effect = [True, False] 247 + 248 + with ( 249 + patch( 250 + "backend._internal.follow_graph.get_async_redis_client", 251 + return_value=mock_redis, 252 + ), 253 + patch( 254 + "backend._internal.follow_graph._fetch_follows_from_bsky", 255 + ) as mock_bsky, 256 + patch( 257 + "backend._internal.tasks.schedule_follow_graph_warm", 258 + ) as mock_schedule, 259 + ): 260 + result1 = await get_follows(TEST_DID) 261 + result2 = await get_follows(TEST_DID) 134 262 135 - mock_fetch.assert_awaited_once_with("did:plc:test") 136 - mock_redis.set.assert_awaited_once() 137 - call_args = mock_redis.set.call_args 138 - assert call_args[0][0] == f"{FOLLOWS_CACHE_PREFIX}did:plc:test" 139 - assert call_args[1]["ex"] == FOLLOWS_CACHE_TTL_SECONDS 263 + assert result1 == SAMPLE_FOLLOWS 264 + assert result2 == SAMPLE_FOLLOWS 265 + mock_bsky.assert_not_called() 266 + 267 + # only one schedule call despite two stale reads 268 + mock_schedule.assert_awaited_once_with(TEST_DID)
-1
backend/tests/api/test_list_record_sync.py
··· 530 530 patch( 531 531 "backend.api.auth.schedule_atproto_sync", new_callable=AsyncMock 532 532 ) as mock_schedule_sync, 533 - patch("backend.api.auth.schedule_follow_graph_warm", new_callable=AsyncMock), 534 533 ): 535 534 async with AsyncClient( 536 535 transport=ASGITransport(app=test_app), base_url="http://test"
+47
backend/tests/api/test_tracks_cache.py
··· 1 + """tests for anonymous discovery feed caching.""" 2 + 3 + from unittest.mock import AsyncMock, patch 4 + 5 + from redis.exceptions import ConnectionError as RedisConnectionError 6 + 7 + from backend.api.tracks.constants import DISCOVERY_CACHE_KEY 8 + from backend.api.tracks.listing import ( 9 + TracksListResponse, 10 + invalidate_tracks_discovery_cache, 11 + ) 12 + 13 + SAMPLE_RESPONSE = TracksListResponse(tracks=[], next_cursor=None, has_more=False) 14 + 15 + 16 + async def test_anonymous_discovery_cache_hit() -> None: 17 + """cached response deserializes correctly from Redis.""" 18 + cached_json = SAMPLE_RESPONSE.model_dump_json() 19 + result = TracksListResponse.model_validate_json(cached_json) 20 + assert result == SAMPLE_RESPONSE 21 + 22 + 23 + async def test_invalidate_clears_cache() -> None: 24 + """invalidate_tracks_discovery_cache deletes the cache key.""" 25 + mock_redis = AsyncMock() 26 + mock_redis.delete = AsyncMock() 27 + 28 + with patch( 29 + "backend.api.tracks.listing.get_async_redis_client", 30 + return_value=mock_redis, 31 + ): 32 + await invalidate_tracks_discovery_cache() 33 + 34 + mock_redis.delete.assert_awaited_once_with(DISCOVERY_CACHE_KEY) 35 + 36 + 37 + async def test_invalidate_handles_redis_error() -> None: 38 + """invalidation silently handles Redis errors.""" 39 + mock_redis = AsyncMock() 40 + mock_redis.delete.side_effect = RedisConnectionError("redis down") 41 + 42 + with patch( 43 + "backend.api.tracks.listing.get_async_redis_client", 44 + return_value=mock_redis, 45 + ): 46 + # should not raise 47 + await invalidate_tracks_discovery_cache()
+1 -1
loq.toml
··· 39 39 40 40 [[rules]] 41 41 path = "backend/src/backend/api/tracks/listing.py" 42 - max_lines = 523 42 + max_lines = 562 43 43 44 44 [[rules]] 45 45 path = "backend/src/backend/api/tracks/mutations.py"