···44 "bytes"
55 "fmt"
66 "io"
77+ "mime/multipart"
88+ "net/http"
7988- "github.com/aws/aws-sdk-go/aws"
99- "github.com/aws/aws-sdk-go/aws/credentials"
1010- "github.com/aws/aws-sdk-go/aws/session"
1111- "github.com/aws/aws-sdk-go/service/s3"
1210 "github.com/haileyok/cocoon/internal/helpers"
1311 "github.com/haileyok/cocoon/models"
1412 "github.com/ipfs/go-cid"
···4240 mime = "application/octet-stream"
4341 }
44424343+ ipfsUpload := s.ipfsConfig != nil && s.ipfsConfig.BlobstoreEnabled
4544 storage := "sqlite"
4646- s3Upload := s.s3Config != nil && s.s3Config.BlobstoreEnabled
4747- if s3Upload {
4848- storage = "s3"
4545+ if ipfsUpload {
4646+ storage = "ipfs"
4947 }
4848+5049 blob := models.Blob{
5150 Did: urepo.Repo.Did,
5251 RefCount: 0,
···6261 read := 0
6362 part := 0
64636565- buf := make([]byte, 0x10000)
6464+ buf := make([]byte, blockSize)
6665 fulldata := new(bytes.Buffer)
67666867 for {
···8079 read += n
8180 fulldata.Write(data)
82818383- if !s3Upload {
8282+ if !ipfsUpload {
8483 blobPart := models.BlobPart{
8584 BlobID: blob.ID,
8685 Idx: part,
···105104 return helpers.ServerError(e, nil)
106105 }
107106108108- if s3Upload {
109109- config := &aws.Config{
110110- Region: aws.String(s.s3Config.Region),
111111- Credentials: credentials.NewStaticCredentials(s.s3Config.AccessKey, s.s3Config.SecretKey, ""),
112112- }
113113-114114- if s.s3Config.Endpoint != "" {
115115- config.Endpoint = aws.String(s.s3Config.Endpoint)
116116- config.S3ForcePathStyle = aws.Bool(true)
117117- }
118118-119119- sess, err := session.NewSession(config)
107107+ if ipfsUpload {
108108+ ipfsCid, err := s.addBlobToIPFS(fulldata.Bytes(), mime)
120109 if err != nil {
121121- logger.Error("error creating aws session", "error", err)
110110+ logger.Error("error adding blob to ipfs", "error", err)
122111 return helpers.ServerError(e, nil)
123112 }
124113125125- svc := s3.New(sess)
114114+ // Overwrite the locally computed CID with the one returned by the IPFS
115115+ // node so that retrieval via the gateway uses the correct address.
116116+ c = ipfsCid
126117127127- if _, err := svc.PutObject(&s3.PutObjectInput{
128128- Bucket: aws.String(s.s3Config.Bucket),
129129- Key: aws.String(fmt.Sprintf("blobs/%s/%s", urepo.Repo.Did, c.String())),
130130- Body: bytes.NewReader(fulldata.Bytes()),
131131- }); err != nil {
132132- logger.Error("error uploading blob to s3", "error", err)
133133- return helpers.ServerError(e, nil)
118118+ if s.ipfsConfig.PinningServiceURL != "" {
119119+ if err := s.pinBlobToRemote(ctx, ipfsCid.String(), fmt.Sprintf("blob/%s/%s", urepo.Repo.Did, ipfsCid.String())); err != nil {
120120+ // Non-fatal: the blob is already on the local node; log and
121121+ // continue so the upload does not fail.
122122+ logger.Warn("error pinning blob to remote pinning service", "cid", ipfsCid.String(), "error", err)
123123+ }
134124 }
135125 }
136126137127 if err := s.db.Exec(ctx, "UPDATE blobs SET cid = ? WHERE id = ?", nil, c.Bytes(), blob.ID).Error; err != nil {
138138- // there should probably be somme handling here if this fails...
139128 logger.Error("error updating blob", "error", err)
140129 return helpers.ServerError(e, nil)
141130 }
···148137149138 return e.JSON(200, resp)
150139}
140140+141141+// addBlobToIPFS adds raw blob data to the configured IPFS node via the Kubo
142142+// HTTP RPC API (/api/v0/add) and returns the resulting CID.
143143+func (s *Server) addBlobToIPFS(data []byte, mimeType string) (cid.Cid, error) {
144144+ nodeURL := s.ipfsConfig.NodeURL
145145+ if nodeURL == "" {
146146+ nodeURL = "http://127.0.0.1:5001"
147147+ }
148148+149149+ endpoint := nodeURL + "/api/v0/add?cid-version=1&hash=sha2-256&pin=true&quieter=true"
150150+151151+ body := new(bytes.Buffer)
152152+ writer := multipart.NewWriter(body)
153153+154154+ part, err := writer.CreateFormFile("file", "blob")
155155+ if err != nil {
156156+ return cid.Undef, fmt.Errorf("error creating multipart field: %w", err)
157157+ }
158158+159159+ if _, err := part.Write(data); err != nil {
160160+ return cid.Undef, fmt.Errorf("error writing blob data to multipart: %w", err)
161161+ }
162162+163163+ if err := writer.Close(); err != nil {
164164+ return cid.Undef, fmt.Errorf("error closing multipart writer: %w", err)
165165+ }
166166+167167+ req, err := http.NewRequest(http.MethodPost, endpoint, body)
168168+ if err != nil {
169169+ return cid.Undef, fmt.Errorf("error building ipfs add request: %w", err)
170170+ }
171171+ req.Header.Set("Content-Type", writer.FormDataContentType())
172172+173173+ resp, err := s.http.Do(req)
174174+ if err != nil {
175175+ return cid.Undef, fmt.Errorf("error calling ipfs add: %w", err)
176176+ }
177177+ defer resp.Body.Close()
178178+179179+ if resp.StatusCode != http.StatusOK {
180180+ msg, _ := io.ReadAll(resp.Body)
181181+ return cid.Undef, fmt.Errorf("ipfs add returned status %d: %s", resp.StatusCode, string(msg))
182182+ }
183183+184184+ // The Kubo API with ?quieter=true returns a single JSON line:
185185+ // {"Hash":"<cid>","Size":"<n>"}
186186+ var result struct {
187187+ Hash string `json:"Hash"`
188188+ }
189189+190190+ if err := readJSON(resp.Body, &result); err != nil {
191191+ return cid.Undef, fmt.Errorf("error decoding ipfs add response: %w", err)
192192+ }
193193+194194+ c, err := cid.Parse(result.Hash)
195195+ if err != nil {
196196+ return cid.Undef, fmt.Errorf("error parsing cid from ipfs add response: %w", err)
197197+ }
198198+199199+ return c, nil
200200+}
+52-56
server/handle_sync_get_blob.go
···44 "bytes"
55 "fmt"
66 "io"
77+ "net/http"
7889 "github.com/Azure/go-autorest/autorest/to"
99- "github.com/aws/aws-sdk-go/aws"
1010- "github.com/aws/aws-sdk-go/aws/credentials"
1111- "github.com/aws/aws-sdk-go/aws/session"
1212- "github.com/aws/aws-sdk-go/service/s3"
1310 "github.com/haileyok/cocoon/internal/helpers"
1411 "github.com/haileyok/cocoon/models"
1512 "github.com/ipfs/go-cid"
···56535754 buf := new(bytes.Buffer)
58555959- if blob.Storage == "sqlite" {
5656+ switch blob.Storage {
5757+ case "sqlite":
6058 var parts []models.BlobPart
6159 if err := s.db.Raw(ctx, "SELECT * FROM blob_parts WHERE blob_id = ? ORDER BY idx", nil, blob.ID).Scan(&parts).Error; err != nil {
6260 logger.Error("error getting blob parts", "error", err)
6361 return helpers.ServerError(e, nil)
6462 }
65636666- // TODO: we can just stream this, don't need to make a buffer
6764 for _, p := range parts {
6865 buf.Write(p.Data)
6966 }
7070- } else if blob.Storage == "s3" {
7171- if !(s.s3Config != nil && s.s3Config.BlobstoreEnabled) {
7272- logger.Error("s3 storage disabled")
7373- return helpers.ServerError(e, nil)
7474- }
7575-7676- blobKey := fmt.Sprintf("blobs/%s/%s", urepo.Repo.Did, c.String())
77677878- if s.s3Config.CDNUrl != "" {
7979- redirectUrl := fmt.Sprintf("%s/%s", s.s3Config.CDNUrl, blobKey)
8080- return e.Redirect(302, redirectUrl)
8181- }
8282-8383- config := &aws.Config{
8484- Region: aws.String(s.s3Config.Region),
8585- Credentials: credentials.NewStaticCredentials(s.s3Config.AccessKey, s.s3Config.SecretKey, ""),
6868+ case "ipfs":
6969+ if s.ipfsConfig == nil || !s.ipfsConfig.BlobstoreEnabled {
7070+ logger.Error("ipfs storage disabled")
7171+ return helpers.ServerError(e, nil)
8672 }
87738888- if s.s3Config.Endpoint != "" {
8989- config.Endpoint = aws.String(s.s3Config.Endpoint)
9090- config.S3ForcePathStyle = aws.Bool(true)
7474+ // If a public gateway is configured, redirect the client directly to it
7575+ // instead of proxying the content through this server.
7676+ if s.ipfsConfig.GatewayURL != "" {
7777+ redirectURL := fmt.Sprintf("%s/ipfs/%s", s.ipfsConfig.GatewayURL, c.String())
7878+ return e.Redirect(302, redirectURL)
9179 }
92809393- sess, err := session.NewSession(config)
8181+ // Otherwise fetch from the local Kubo node via /api/v0/cat and stream
8282+ // the content back to the client.
8383+ data, err := s.fetchBlobFromIPFS(c.String())
9484 if err != nil {
9595- logger.Error("error creating aws session", "error", err)
8585+ logger.Error("error fetching blob from ipfs node", "cid", c.String(), "error", err)
9686 return helpers.ServerError(e, nil)
9787 }
9898-9999- svc := s3.New(sess)
100100- if result, err := svc.GetObject(&s3.GetObjectInput{
101101- Bucket: aws.String(s.s3Config.Bucket),
102102- Key: aws.String(blobKey),
103103- }); err != nil {
104104- logger.Error("error getting blob from s3", "error", err)
105105- return helpers.ServerError(e, nil)
106106- } else {
107107- read := 0
108108- part := 0
109109- partBuf := make([]byte, 0x10000)
110110-111111- for {
112112- n, err := io.ReadFull(result.Body, partBuf)
113113- if err == io.ErrUnexpectedEOF || err == io.EOF {
114114- if n == 0 {
115115- break
116116- }
117117- } else if err != nil && err != io.ErrUnexpectedEOF {
118118- logger.Error("error reading blob", "error", err)
119119- return helpers.ServerError(e, nil)
120120- }
8888+ buf.Write(data)
12189122122- data := partBuf[:n]
123123- read += n
124124- buf.Write(data)
125125- part++
126126- }
127127- }
128128- } else {
9090+ default:
12991 logger.Error("unknown storage", "storage", blob.Storage)
13092 return helpers.ServerError(e, nil)
13193 }
···1349613597 return e.Stream(200, "application/octet-stream", buf)
13698}
9999+100100+// fetchBlobFromIPFS retrieves blob data for the given CID from the local Kubo
101101+// node using the HTTP RPC API (/api/v0/cat).
102102+func (s *Server) fetchBlobFromIPFS(cidStr string) ([]byte, error) {
103103+ nodeURL := s.ipfsConfig.NodeURL
104104+ if nodeURL == "" {
105105+ nodeURL = "http://127.0.0.1:5001"
106106+ }
107107+108108+ endpoint := fmt.Sprintf("%s/api/v0/cat?arg=%s", nodeURL, cidStr)
109109+110110+ req, err := http.NewRequest(http.MethodPost, endpoint, nil)
111111+ if err != nil {
112112+ return nil, fmt.Errorf("error building ipfs cat request: %w", err)
113113+ }
114114+115115+ resp, err := s.http.Do(req)
116116+ if err != nil {
117117+ return nil, fmt.Errorf("error calling ipfs cat: %w", err)
118118+ }
119119+ defer resp.Body.Close()
120120+121121+ if resp.StatusCode != http.StatusOK {
122122+ msg, _ := io.ReadAll(resp.Body)
123123+ return nil, fmt.Errorf("ipfs cat returned status %d: %s", resp.StatusCode, string(msg))
124124+ }
125125+126126+ data, err := io.ReadAll(resp.Body)
127127+ if err != nil {
128128+ return nil, fmt.Errorf("error reading ipfs cat response: %w", err)
129129+ }
130130+131131+ return data, nil
132132+}
+72
server/ipfs.go
···11+package server
22+33+import (
44+ "bytes"
55+ "context"
66+ "encoding/json"
77+ "fmt"
88+ "io"
99+ "net/http"
1010+ "time"
1111+)
1212+1313+// readJSON decodes a single JSON value from r into dst.
1414+func readJSON(r io.Reader, dst any) error {
1515+ return json.NewDecoder(r).Decode(dst)
1616+}
1717+1818+// pinBlobToRemote pins a CID to the configured remote pinning service using
1919+// the IPFS Pinning Service API spec
2020+// (https://ipfs.github.io/pinning-services-api-spec/).
2121+//
2222+// The call is best-effort: callers should log the error but not treat it as
2323+// fatal so that a transient pinning failure does not prevent a blob upload
2424+// from succeeding.
2525+func (s *Server) pinBlobToRemote(ctx context.Context, cidStr string, name string) error {
2626+ serviceURL := s.ipfsConfig.PinningServiceURL
2727+ token := s.ipfsConfig.PinningServiceToken
2828+2929+ if serviceURL == "" {
3030+ return fmt.Errorf("no pinning service URL configured")
3131+ }
3232+3333+ endpoint := serviceURL + "/pins"
3434+3535+ payload := map[string]any{
3636+ "cid": cidStr,
3737+ "name": name,
3838+ "meta": map[string]string{
3939+ "pinned_by": "cocoon",
4040+ "pinned_at": time.Now().UTC().Format(time.RFC3339),
4141+ },
4242+ }
4343+4444+ body, err := json.Marshal(payload)
4545+ if err != nil {
4646+ return fmt.Errorf("error marshalling pin request: %w", err)
4747+ }
4848+4949+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(body))
5050+ if err != nil {
5151+ return fmt.Errorf("error building pin request: %w", err)
5252+ }
5353+5454+ req.Header.Set("Content-Type", "application/json")
5555+ if token != "" {
5656+ req.Header.Set("Authorization", "Bearer "+token)
5757+ }
5858+5959+ resp, err := s.http.Do(req)
6060+ if err != nil {
6161+ return fmt.Errorf("error calling pinning service: %w", err)
6262+ }
6363+ defer resp.Body.Close()
6464+6565+ // The Pinning Service API returns 202 Accepted on success.
6666+ if resp.StatusCode != http.StatusAccepted && resp.StatusCode != http.StatusOK {
6767+ msg, _ := io.ReadAll(resp.Body)
6868+ return fmt.Errorf("pinning service returned status %d: %s", resp.StatusCode, string(msg))
6969+ }
7070+7171+ return nil
7272+}
+4-2
server/repo.go
···672672 return nil, err
673673 }
674674675675- // TODO: this does _not_ handle deletions of blobs that are on s3 storage!!!! we need to get the blob, see what
676676- // storage it is in, and clean up s3!!!!
675675+ // TODO: blobs with storage == "ipfs" are not unpinned from the local
676676+ // IPFS node or the remote pinning service when their ref_count reaches
677677+ // zero. A future cleanup pass should call /api/v0/pin/rm on the local
678678+ // node and DELETE /pins/<requestid> on the remote pinning service.
677679 if res.Count == 0 {
678680 if err := rm.db.Exec(ctx, "DELETE FROM blobs WHERE id = ?", nil, res.ID).Error; err != nil {
679681 return nil, err
+37-163
server/server.go
···11package server
2233import (
44- "bytes"
54 "context"
65 "crypto/ecdsa"
76 "embed"
···1716 "text/template"
1817 "time"
19182020- "github.com/aws/aws-sdk-go/aws"
2121- "github.com/aws/aws-sdk-go/aws/credentials"
2222- "github.com/aws/aws-sdk-go/aws/session"
2323- "github.com/aws/aws-sdk-go/service/s3"
2419 "github.com/bluesky-social/indigo/api/atproto"
2520 "github.com/bluesky-social/indigo/atproto/syntax"
2621 "github.com/bluesky-social/indigo/events"
2722 "github.com/bluesky-social/indigo/util"
2823 "github.com/bluesky-social/indigo/xrpc"
2924 "github.com/domodwyer/mailyak/v3"
2525+ "github.com/glebarez/sqlite"
3026 "github.com/go-playground/validator"
3127 "github.com/gorilla/sessions"
3228 "github.com/haileyok/cocoon/identity"
···4440 "github.com/labstack/echo/v4"
4541 "github.com/labstack/echo/v4/middleware"
4642 slogecho "github.com/samber/slog-echo"
4747- "gorm.io/driver/postgres"
4848- "gorm.io/driver/sqlite"
4943 "gorm.io/gorm"
5044)
5145···5347 AccountSessionMaxAge = 30 * 24 * time.Hour // one week
5448)
55495656-type S3Config struct {
5757- BackupsEnabled bool
5050+// IPFSConfig holds configuration for IPFS pinning-based blob storage.
5151+// Blobs are added to an IPFS node via the Kubo HTTP RPC API and optionally
5252+// pinned to a remote pinning service that implements the IPFS Pinning Service
5353+// API spec (e.g. Pinata, web3.storage, Infura).
5454+type IPFSConfig struct {
5555+ // BlobstoreEnabled controls whether blobs are stored on IPFS instead of
5656+ // SQLite.
5857 BlobstoreEnabled bool
5959- Endpoint string
6060- Region string
6161- Bucket string
6262- AccessKey string
6363- SecretKey string
6464- CDNUrl string
5858+5959+ // NodeURL is the base URL of the Kubo (go-ipfs) RPC API used for adding
6060+ // blobs, e.g. "http://127.0.0.1:5001".
6161+ NodeURL string
6262+6363+ // GatewayURL is the base URL of the IPFS gateway used to serve blobs, e.g.
6464+ // "https://ipfs.io" or your own gateway. When set, getBlob redirects to
6565+ // this URL instead of fetching the content through the node.
6666+ GatewayURL string
6767+6868+ // PinningServiceURL is the URL of a remote IPFS Pinning Service API
6969+ // endpoint, e.g. "https://api.pinata.cloud/psa". Leave empty to skip
7070+ // remote pinning.
7171+ PinningServiceURL string
7272+7373+ // PinningServiceToken is the Bearer token used to authenticate with the
7474+ // remote pinning service.
7575+ PinningServiceToken string
6576}
66776778type Server struct {
···8495 lastRequestCrawl time.Time
8596 requestCrawlMu sync.Mutex
86978787- dbName string
8888- dbType string
8989- s3Config *S3Config
9898+ dbName string
9999+ ipfsConfig *IPFSConfig
90100}
9110192102type Args struct {
···95105 LogLevel slog.Level
96106 Addr string
97107 DbName string
9898- DbType string
9999- DatabaseURL string
100108 Version string
101109 Did string
102110 Hostname string
···114122 SmtpEmail string
115123 SmtpName string
116124117117- S3Config *S3Config
125125+ IPFSConfig *IPFSConfig
118126119127 SessionSecret string
120128 SessionCookieKey string
···332340 IdleTimeout: 5 * time.Minute,
333341 }
334342335335- dbType := args.DbType
336336- if dbType == "" {
337337- dbType = "sqlite"
338338- }
339339-340343 var gdb *gorm.DB
341344 var err error
342342- switch dbType {
343343- case "postgres":
344344- if args.DatabaseURL == "" {
345345- return nil, fmt.Errorf("database-url must be set when using postgres")
346346- }
347347- gdb, err = gorm.Open(postgres.Open(args.DatabaseURL), &gorm.Config{})
348348- if err != nil {
349349- return nil, fmt.Errorf("failed to connect to postgres: %w", err)
350350- }
351351- logger.Info("connected to PostgreSQL database")
352352- default:
353353- gdb, err = gorm.Open(sqlite.Open(args.DbName), &gorm.Config{})
354354- if err != nil {
355355- return nil, fmt.Errorf("failed to open sqlite database: %w", err)
356356- }
357357- gdb.Exec("PRAGMA journal_mode=WAL")
358358- gdb.Exec("PRAGMA synchronous=NORMAL")
359359-360360- logger.Info("connected to SQLite database", "path", args.DbName)
345345+ gdb, err = gorm.Open(sqlite.Open(args.DbName), &gorm.Config{})
346346+ if err != nil {
347347+ return nil, fmt.Errorf("failed to open sqlite database: %w", err)
361348 }
349349+ gdb.Exec("PRAGMA journal_mode=WAL")
350350+ gdb.Exec("PRAGMA synchronous=NORMAL")
351351+ logger.Info("connected to SQLite database", "path", args.DbName)
362352 dbw := db.NewDB(gdb)
363353364354 rkbytes, err := os.ReadFile(args.RotationKeyPath)
···437427 evtman: events.NewEventManager(evtPersister),
438428 passport: identity.NewPassport(h, identity.NewMemCache(10_000)),
439429440440- dbName: args.DbName,
441441- dbType: dbType,
442442- s3Config: args.S3Config,
430430+ dbName: args.DbName,
431431+ ipfsConfig: args.IPFSConfig,
443432444433 oauthProvider: provider.NewProvider(provider.Args{
445434 Hostname: args.Hostname,
···611600 }
612601 }()
613602614614- go s.backupRoutine()
615615-616603 go func() {
617604 if err := s.requestCrawl(ctx); err != nil {
618605 logger.Error("error requesting crawls", "err", err)
···653640 s.lastRequestCrawl = time.Now()
654641655642 return nil
656656-}
657657-658658-func (s *Server) doBackup() {
659659- logger := s.logger.With("name", "doBackup")
660660-661661- if s.dbType == "postgres" {
662662- logger.Info("skipping S3 backup - PostgreSQL backups should be handled externally (pg_dump, managed database backups, etc.)")
663663- return
664664- }
665665-666666- start := time.Now()
667667-668668- logger.Info("beginning backup to s3...")
669669-670670- tmpFile := fmt.Sprintf("/tmp/cocoon-backup-%s.db", time.Now().Format(time.RFC3339Nano))
671671- defer os.Remove(tmpFile)
672672-673673- if err := s.db.Client().Exec(fmt.Sprintf("VACUUM INTO '%s'", tmpFile)).Error; err != nil {
674674- logger.Error("error creating tmp backup file", "err", err)
675675- return
676676- }
677677-678678- backupData, err := os.ReadFile(tmpFile)
679679- if err != nil {
680680- logger.Error("error reading tmp backup file", "err", err)
681681- return
682682- }
683683-684684- logger.Info("sending to s3...")
685685-686686- currTime := time.Now().Format("2006-01-02_15-04-05")
687687- key := "cocoon-backup-" + currTime + ".db"
688688-689689- config := &aws.Config{
690690- Region: aws.String(s.s3Config.Region),
691691- Credentials: credentials.NewStaticCredentials(s.s3Config.AccessKey, s.s3Config.SecretKey, ""),
692692- }
693693-694694- if s.s3Config.Endpoint != "" {
695695- config.Endpoint = aws.String(s.s3Config.Endpoint)
696696- config.S3ForcePathStyle = aws.Bool(true)
697697- }
698698-699699- sess, err := session.NewSession(config)
700700- if err != nil {
701701- logger.Error("error creating s3 session", "err", err)
702702- return
703703- }
704704-705705- svc := s3.New(sess)
706706-707707- if _, err := svc.PutObject(&s3.PutObjectInput{
708708- Bucket: aws.String(s.s3Config.Bucket),
709709- Key: aws.String(key),
710710- Body: bytes.NewReader(backupData),
711711- }); err != nil {
712712- logger.Error("error uploading file to s3", "err", err)
713713- return
714714- }
715715-716716- logger.Info("finished uploading backup to s3", "key", key, "duration", time.Since(start).Seconds())
717717-718718- os.WriteFile("last-backup.txt", []byte(time.Now().Format(time.RFC3339Nano)), 0644)
719719-}
720720-721721-func (s *Server) backupRoutine() {
722722- logger := s.logger.With("name", "backupRoutine")
723723-724724- if s.s3Config == nil || !s.s3Config.BackupsEnabled {
725725- return
726726- }
727727-728728- if s.s3Config.Region == "" {
729729- logger.Warn("no s3 region configured but backups are enabled. backups will not run.")
730730- return
731731- }
732732-733733- if s.s3Config.Bucket == "" {
734734- logger.Warn("no s3 bucket configured but backups are enabled. backups will not run.")
735735- return
736736- }
737737-738738- if s.s3Config.AccessKey == "" {
739739- logger.Warn("no s3 access key configured but backups are enabled. backups will not run.")
740740- return
741741- }
742742-743743- if s.s3Config.SecretKey == "" {
744744- logger.Warn("no s3 secret key configured but backups are enabled. backups will not run.")
745745- return
746746- }
747747-748748- shouldBackupNow := false
749749- lastBackupStr, err := os.ReadFile("last-backup.txt")
750750- if err != nil {
751751- shouldBackupNow = true
752752- } else {
753753- lastBackup, err := time.Parse(time.RFC3339Nano, string(lastBackupStr))
754754- if err != nil {
755755- shouldBackupNow = true
756756- } else if time.Since(lastBackup).Seconds() > 3600 {
757757- shouldBackupNow = true
758758- }
759759- }
760760-761761- if shouldBackupNow {
762762- go s.doBackup()
763763- }
764764-765765- ticker := time.NewTicker(time.Hour)
766766- for range ticker.C {
767767- go s.doBackup()
768768- }
769643}
770644771645func (s *Server) UpdateRepo(ctx context.Context, did string, root cid.Cid, rev string) error {