···11+// Code generated by cmd/lexgen (see Makefile's lexgen); DO NOT EDIT.
22+33+package tangled
44+55+// schema: sh.tangled.repo.tag
66+77+import (
88+ "bytes"
99+ "context"
1010+1111+ "github.com/bluesky-social/indigo/lex/util"
1212+)
1313+1414+const (
1515+ RepoTagNSID = "sh.tangled.repo.tag"
1616+)
1717+1818+// RepoTag calls the XRPC method "sh.tangled.repo.tag".
1919+//
2020+// repo: Repository identifier in format 'did:plc:.../repoName'
2121+// tag: Name of tag, such as v1.3.0
2222+func RepoTag(ctx context.Context, c util.LexClient, repo string, tag string) ([]byte, error) {
2323+ buf := new(bytes.Buffer)
2424+2525+ params := map[string]interface{}{}
2626+ params["repo"] = repo
2727+ params["tag"] = tag
2828+ if err := c.LexDo(ctx, util.Query, "", "sh.tangled.repo.tag", params, nil, buf); err != nil {
2929+ return nil, err
3030+ }
3131+3232+ return buf.Bytes(), nil
3333+}
+14-2
api/tangled/repotree.go
···16161717// RepoTree_LastCommit is a "lastCommit" in the sh.tangled.repo.tree schema.
1818type RepoTree_LastCommit struct {
1919+ Author *RepoTree_Signature `json:"author,omitempty" cborgen:"author,omitempty"`
1920 // hash: Commit hash
2021 Hash string `json:"hash" cborgen:"hash"`
2122 // message: Commit message
···2728// RepoTree_Output is the output of a sh.tangled.repo.tree call.
2829type RepoTree_Output struct {
2930 // dotdot: Parent directory path
3030- Dotdot *string `json:"dotdot,omitempty" cborgen:"dotdot,omitempty"`
3131- Files []*RepoTree_TreeEntry `json:"files" cborgen:"files"`
3131+ Dotdot *string `json:"dotdot,omitempty" cborgen:"dotdot,omitempty"`
3232+ Files []*RepoTree_TreeEntry `json:"files" cborgen:"files"`
3333+ LastCommit *RepoTree_LastCommit `json:"lastCommit,omitempty" cborgen:"lastCommit,omitempty"`
3234 // parent: The parent path in the tree
3335 Parent *string `json:"parent,omitempty" cborgen:"parent,omitempty"`
3436 // readme: Readme for this file tree
···4345 Contents string `json:"contents" cborgen:"contents"`
4446 // filename: Name of the readme file
4547 Filename string `json:"filename" cborgen:"filename"`
4848+}
4949+5050+// RepoTree_Signature is a "signature" in the sh.tangled.repo.tree schema.
5151+type RepoTree_Signature struct {
5252+ // email: Author email
5353+ Email string `json:"email" cborgen:"email"`
5454+ // name: Author name
5555+ Name string `json:"name" cborgen:"name"`
5656+ // when: Author timestamp
5757+ When string `json:"when" cborgen:"when"`
4658}
47594860// RepoTree_TreeEntry is a "treeEntry" in the sh.tangled.repo.tree schema.
+194
appview/db/db.go
···33import (
44 "context"
55 "database/sql"
66+ "fmt"
67 "log/slog"
78 "strings"
89···568569 unique (from_at, to_at)
569570 );
570571572572+ create table if not exists webhooks (
573573+ id integer primary key autoincrement,
574574+ repo_at text not null,
575575+ url text not null,
576576+ secret text,
577577+ active integer not null default 1,
578578+ events text not null, -- comma-separated list of events
579579+ created_at text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
580580+ updated_at text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
581581+582582+ foreign key (repo_at) references repos(at_uri) on delete cascade
583583+ );
584584+585585+ create table if not exists webhook_deliveries (
586586+ id integer primary key autoincrement,
587587+ webhook_id integer not null,
588588+ event text not null,
589589+ delivery_id text not null,
590590+ url text not null,
591591+ request_body text not null,
592592+ response_code integer,
593593+ response_body text,
594594+ success integer not null default 0,
595595+ created_at text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
596596+597597+ foreign key (webhook_id) references webhooks(id) on delete cascade
598598+ );
599599+571600 create table if not exists migrations (
572601 id integer primary key autoincrement,
573602 name text unique
574603 );
575604605605+ create table if not exists punchcard_preferences (
606606+ id integer primary key autoincrement,
607607+ user_did text not null unique,
608608+ hide_mine integer default 0,
609609+ hide_others integer default 0
610610+ );
611611+576612 -- indexes for better performance
577613 create index if not exists idx_notifications_recipient_created on notifications(recipient_did, created desc);
578614 create index if not exists idx_notifications_recipient_read on notifications(recipient_did, read);
579615 create index if not exists idx_references_from_at on reference_links(from_at);
580616 create index if not exists idx_references_to_at on reference_links(to_at);
617617+ create index if not exists idx_webhooks_repo_at on webhooks(repo_at);
618618+ create index if not exists idx_webhook_deliveries_webhook_id on webhook_deliveries(webhook_id);
581619 `)
582620 if err != nil {
583621 return nil, err
···11791217 alter table profile add column avatar text;
11801218 `)
11811219 return err
12201220+ })
12211221+12221222+ orm.RunMigration(conn, logger, "remove-profile-stats-column-constraint", func(tx *sql.Tx) error {
12231223+ _, err := tx.Exec(`
12241224+ -- create new table without the check constraint
12251225+ create table profile_stats_new (
12261226+ id integer primary key autoincrement,
12271227+ did text not null,
12281228+ kind text not null, -- no constraint this time
12291229+ foreign key (did) references profile(did) on delete cascade
12301230+ );
12311231+12321232+ -- copy data from old table
12331233+ insert into profile_stats_new (id, did, kind)
12341234+ select id, did, kind
12351235+ from profile_stats;
12361236+12371237+ -- drop old table
12381238+ drop table profile_stats;
12391239+12401240+ -- rename new table
12411241+ alter table profile_stats_new rename to profile_stats;
12421242+ `)
12431243+ return err
12441244+ })
12451245+12461246+ // we cannot modify user-owned record on repository delete
12471247+ orm.RunMigration(conn, logger, "remove-foreign-key-profile_pinned_repositories-and-repos", func(tx *sql.Tx) error {
12481248+ _, err := tx.Exec(`
12491249+ create table profile_pinned_repositories_new (
12501250+ did text not null,
12511251+12521252+ -- data
12531253+ at_uri text not null,
12541254+12551255+ -- constraints
12561256+ unique(did, at_uri),
12571257+ foreign key (did) references profile(did) on delete cascade
12581258+ );
12591259+12601260+ insert into profile_pinned_repositories_new (did, at_uri)
12611261+ select did, at_uri from profile_pinned_repositories;
12621262+12631263+ drop table profile_pinned_repositories;
12641264+ alter table profile_pinned_repositories_new rename to profile_pinned_repositories;
12651265+ `)
12661266+ return err
12671267+ })
12681268+12691269+ // several changes here
12701270+ // 1. remove autoincrement id for these tables
12711271+ // 2. remove unique constraints other than (did, rkey) to handle non-unique atproto records
12721272+ // 3. add generated at_uri field
12731273+ //
12741274+ // see comments below and commit message for details
12751275+ orm.RunMigration(conn, logger, "flexible-stars-reactions-follows-public_keys", func(tx *sql.Tx) error {
12761276+ // - add at_uri
12771277+ // - remove unique constraint (did, subject_at)
12781278+ if _, err := tx.Exec(`
12791279+ create table stars_new (
12801280+ did text not null,
12811281+ rkey text not null,
12821282+ at_uri text generated always as ('at://' || did || '/' || 'sh.tangled.feed.star' || '/' || rkey) stored,
12831283+12841284+ subject_at text not null,
12851285+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
12861286+12871287+ unique(did, rkey)
12881288+ );
12891289+12901290+ insert into stars_new (did, rkey, subject_at, created)
12911291+ select did, rkey, subject_at, created from stars;
12921292+12931293+ drop table stars;
12941294+ alter table stars_new rename to stars;
12951295+ `); err != nil {
12961296+ return fmt.Errorf("migrating stars: %w", err)
12971297+ }
12981298+12991299+ // - add at_uri
13001300+ // - reacted_by_did -> did
13011301+ // - thread_at -> subject_at
13021302+ // - remove unique constraint
13031303+ if _, err := tx.Exec(`
13041304+ create table reactions_new (
13051305+ did text not null,
13061306+ rkey text not null,
13071307+ at_uri text generated always as ('at://' || did || '/' || 'sh.tangled.feed.reaction' || '/' || rkey) stored,
13081308+13091309+ subject_at text not null,
13101310+ kind text not null,
13111311+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
13121312+13131313+ unique(did, rkey)
13141314+ );
13151315+13161316+ insert into reactions_new (did, rkey, subject_at, kind, created)
13171317+ select reacted_by_did, rkey, thread_at, kind, created from reactions;
13181318+13191319+ drop table reactions;
13201320+ alter table reactions_new rename to reactions;
13211321+ `); err != nil {
13221322+ return fmt.Errorf("migrating reactions: %w", err)
13231323+ }
13241324+13251325+ // - add at_uri column
13261326+ // - user_did -> did
13271327+ // - followed_at -> created
13281328+ // - remove unique constraint
13291329+ // - remove check constraint
13301330+ if _, err := tx.Exec(`
13311331+ create table follows_new (
13321332+ did text not null,
13331333+ rkey text not null,
13341334+ at_uri text generated always as ('at://' || did || '/' || 'sh.tangled.graph.follow' || '/' || rkey) stored,
13351335+13361336+ subject_did text not null,
13371337+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
13381338+13391339+ unique(did, rkey)
13401340+ );
13411341+13421342+ insert into follows_new (did, rkey, subject_did, created)
13431343+ select user_did, rkey, subject_did, followed_at from follows;
13441344+13451345+ drop table follows;
13461346+ alter table follows_new rename to follows;
13471347+ `); err != nil {
13481348+ return fmt.Errorf("migrating follows: %w", err)
13491349+ }
13501350+13511351+ // - add at_uri column
13521352+ // - remove foreign key relationship from repos
13531353+ if _, err := tx.Exec(`
13541354+ create table public_keys_new (
13551355+ did text not null,
13561356+ rkey text not null,
13571357+ at_uri text generated always as ('at://' || did || '/' || 'sh.tangled.publicKey' || '/' || rkey) stored,
13581358+13591359+ name text not null,
13601360+ key text not null,
13611361+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
13621362+13631363+ unique(did, rkey)
13641364+ );
13651365+13661366+ insert into public_keys_new (did, rkey, name, key, created)
13671367+ select did, rkey, name, key, created from public_keys;
13681368+13691369+ drop table public_keys;
13701370+ alter table public_keys_new rename to public_keys;
13711371+ `); err != nil {
13721372+ return fmt.Errorf("migrating public_keys: %w", err)
13731373+ }
13741374+13751375+ return nil
11821376 })
1183137711841378 return &DB{
+41-35
appview/db/follow.go
···66 "strings"
77 "time"
8899+ "github.com/bluesky-social/indigo/atproto/syntax"
910 "tangled.org/core/appview/models"
1011 "tangled.org/core/orm"
1112)
12131313-func AddFollow(e Execer, follow *models.Follow) error {
1414- query := `insert or ignore into follows (user_did, subject_did, rkey) values (?, ?, ?)`
1515- _, err := e.Exec(query, follow.UserDid, follow.SubjectDid, follow.Rkey)
1414+func UpsertFollow(e Execer, follow models.Follow) error {
1515+ _, err := e.Exec(
1616+ `insert into follows (did, rkey, subject_did, created)
1717+ values (?, ?, ?, ?)
1818+ on conflict(did, rkey) do update set
1919+ subject_did = excluded.subject_did,
2020+ created = excluded.created`,
2121+ follow.UserDid,
2222+ follow.Rkey,
2323+ follow.SubjectDid,
2424+ follow.FollowedAt.Format(time.RFC3339),
2525+ )
1626 return err
1727}
18281919-// Get a follow record
2020-func GetFollow(e Execer, userDid, subjectDid string) (*models.Follow, error) {
2121- query := `select user_did, subject_did, followed_at, rkey from follows where user_did = ? and subject_did = ?`
2222- row := e.QueryRow(query, userDid, subjectDid)
2323-2424- var follow models.Follow
2525- var followedAt string
2626- err := row.Scan(&follow.UserDid, &follow.SubjectDid, &followedAt, &follow.Rkey)
2929+// Remove a follow
3030+func DeleteFollow(e Execer, did, subjectDid syntax.DID) ([]syntax.ATURI, error) {
3131+ var deleted []syntax.ATURI
3232+ rows, err := e.Query(
3333+ `delete from follows
3434+ where did = ? and subject_did = ?
3535+ returning at_uri`,
3636+ did,
3737+ subjectDid,
3838+ )
2739 if err != nil {
2828- return nil, err
4040+ return nil, fmt.Errorf("deleting stars: %w", err)
2941 }
4242+ defer rows.Close()
30433131- followedAtTime, err := time.Parse(time.RFC3339, followedAt)
3232- if err != nil {
3333- log.Println("unable to determine followed at time")
3434- follow.FollowedAt = time.Now()
3535- } else {
3636- follow.FollowedAt = followedAtTime
4444+ for rows.Next() {
4545+ var aturi syntax.ATURI
4646+ if err := rows.Scan(&aturi); err != nil {
4747+ return nil, fmt.Errorf("scanning at_uri: %w", err)
4848+ }
4949+ deleted = append(deleted, aturi)
3750 }
3838-3939- return &follow, nil
4040-}
4141-4242-// Remove a follow
4343-func DeleteFollow(e Execer, userDid, subjectDid string) error {
4444- _, err := e.Exec(`delete from follows where user_did = ? and subject_did = ?`, userDid, subjectDid)
4545- return err
5151+ return deleted, nil
4652}
47534854// Remove a follow
4955func DeleteFollowByRkey(e Execer, userDid, rkey string) error {
5050- _, err := e.Exec(`delete from follows where user_did = ? and rkey = ?`, userDid, rkey)
5656+ _, err := e.Exec(`delete from follows where did = ? and rkey = ?`, userDid, rkey)
5157 return err
5258}
5359···5662 err := e.QueryRow(
5763 `SELECT
5864 COUNT(CASE WHEN subject_did = ? THEN 1 END) AS followers,
5959- COUNT(CASE WHEN user_did = ? THEN 1 END) AS following
6565+ COUNT(CASE WHEN did = ? THEN 1 END) AS following
6066 FROM follows;`, did, did).Scan(&followers, &following)
6167 if err != nil {
6268 return models.FollowStats{}, err
···96102 group by subject_did
97103 ) f
98104 full outer join (
9999- select user_did as did, count(*) as following
105105+ select did as did, count(*) as following
100106 from follows
101101- where user_did in (%s)
102102- group by user_did
107107+ where did in (%s)
108108+ group by did
103109 ) g on f.did = g.did`,
104110 placeholderStr, placeholderStr)
105111···156162 }
157163158164 query := fmt.Sprintf(
159159- `select user_did, subject_did, followed_at, rkey
165165+ `select did, subject_did, created, rkey
160166 from follows
161167 %s
162162- order by followed_at desc
168168+ order by created desc
163169 %s
164170 `, whereClause, limitClause)
165171···198204}
199205200206func GetFollowing(e Execer, did string) ([]models.Follow, error) {
201201- return GetFollows(e, 0, orm.FilterEq("user_did", did))
207207+ return GetFollows(e, 0, orm.FilterEq("did", did))
202208}
203209204210func getFollowStatuses(e Execer, userDid string, subjectDids []string) (map[string]models.FollowStatus, error) {
···239245 query := fmt.Sprintf(`
240246 SELECT subject_did
241247 FROM follows
242242- WHERE user_did = ? AND subject_did IN (%s)
248248+ WHERE did = ? AND subject_did IN (%s)
243249 `, strings.Join(placeholders, ","))
244250245251 rows, err := e.Query(query, args...)
+48-1
appview/db/pipeline.go
···170170171171// this is a mega query, but the most useful one:
172172// get N pipelines, for each one get the latest status of its N workflows
173173+//
174174+// the pipelines table is aliased to `p`
175175+// the triggers table is aliased to `t`
173176func GetPipelineStatuses(e Execer, limit int, filters ...orm.Filter) ([]models.Pipeline, error) {
174177 var conditions []string
175178 var args []any
176179 for _, filter := range filters {
177177- filter.Key = "p." + filter.Key // the table is aliased in the query to `p`
178180 conditions = append(conditions, filter.Condition())
179181 args = append(args, filter.Arg()...)
180182 }
···366368367369 return all, nil
368370}
371371+372372+// the pipelines table is aliased to `p`
373373+// the triggers table is aliased to `t`
374374+func GetTotalPipelineStatuses(e Execer, filters ...orm.Filter) (int64, error) {
375375+ var conditions []string
376376+ var args []any
377377+ for _, filter := range filters {
378378+ conditions = append(conditions, filter.Condition())
379379+ args = append(args, filter.Arg()...)
380380+ }
381381+382382+ whereClause := ""
383383+ if conditions != nil {
384384+ whereClause = " where " + strings.Join(conditions, " and ")
385385+ }
386386+387387+ query := fmt.Sprintf(`
388388+ select
389389+ count(1)
390390+ from
391391+ pipelines p
392392+ join
393393+ triggers t ON p.trigger_id = t.id
394394+ %s
395395+ `, whereClause)
396396+397397+ rows, err := e.Query(query, args...)
398398+ if err != nil {
399399+ return 0, err
400400+ }
401401+ defer rows.Close()
402402+403403+ for rows.Next() {
404404+ var count int64
405405+ err := rows.Scan(&count)
406406+ if err != nil {
407407+ return 0, err
408408+ }
409409+410410+ return count, nil
411411+ }
412412+413413+ // unreachable
414414+ return 0, nil
415415+}
···110110 type="text"
111111 id="description"
112112 name="description"
113113+ maxlength="140"
113114 class="w-full w-full dark:bg-gray-700 dark:text-white dark:border-gray-600 border border-gray-300 rounded px-3 py-2"
114115 placeholder="A brief description of your project..."
115116 />
116117 <p class="text-sm text-gray-500 dark:text-gray-400 mt-1">
117117- Optional. A short description to help others understand what your project does.
118118+ Optional. A short description to help others understand what your project does (max 140 characters).
118119 </p>
119120 </div>
120121{{ end }}
···11+# how to setup local appview dev environment
22+33+Appview requires several microservices from knot and spindle to entire atproto infra. This test environment is implemented under nixos vm.
44+55+1. copy `contrib/example.env` to `.env`, fill it and source it
66+2. run vm
77+ ```bash
88+ nix run --impure .#vm
99+ ```
1010+3. trust the generated cert from host machine
1111+ ```bash
1212+ # for macos
1313+ sudo security add-trusted-cert -d -r trustRoot \
1414+ -k /Library/Keychains/System.keychain \
1515+ ./nix/vm-data/caddy/.local/share/caddy/pki/authorities/local/root.crt
1616+ ```
1717+4. create test accounts with valid emails (use [`create-test-account.sh`](./scripts/create-test-account.sh))
1818+5. create default labels (use [`setup-const-records`](./scripts/setup-const-records.sh))
1919+6. restart vm with correct owner-did
2020+2121+for git-https, you should change your local git config:
2222+```
2323+[http "https://knot.tngl.boltless.dev"]
2424+ sslCAPath = /Users/boltless/repo/tangled/nix/vm-data/caddy/.local/share/caddy/pki/authorities/local/
2525+```
+68
contrib/scripts/create-test-account.sh
···11+#!/bin/bash
22+set -o errexit
33+set -o nounset
44+set -o pipefail
55+66+source "$(dirname "$0")/../pds.env"
77+88+# PDS_HOSTNAME=
99+# PDS_ADMIN_PASSWORD=
1010+1111+# curl a URL and fail if the request fails.
1212+function curl_cmd_get {
1313+ curl --fail --silent --show-error "$@"
1414+}
1515+1616+# curl a URL and fail if the request fails.
1717+function curl_cmd_post {
1818+ curl --fail --silent --show-error --request POST --header "Content-Type: application/json" "$@"
1919+}
2020+2121+# curl a URL but do not fail if the request fails.
2222+function curl_cmd_post_nofail {
2323+ curl --silent --show-error --request POST --header "Content-Type: application/json" "$@"
2424+}
2525+2626+USERNAME="${1:-}"
2727+2828+if [[ "${USERNAME}" == "" ]]; then
2929+ read -p "Enter a username: " USERNAME
3030+fi
3131+3232+if [[ "${USERNAME}" == "" ]]; then
3333+ echo "ERROR: missing USERNAME parameter." >/dev/stderr
3434+ echo "Usage: $0 ${SUBCOMMAND} <USERNAME>" >/dev/stderr
3535+ exit 1
3636+fi
3737+3838+EMAIL=${USERNAME}@${PDS_HOSTNAME}
3939+4040+PASSWORD="password"
4141+INVITE_CODE="$(curl_cmd_post \
4242+ --user "admin:${PDS_ADMIN_PASSWORD}" \
4343+ --data '{"useCount": 1}' \
4444+ "https://${PDS_HOSTNAME}/xrpc/com.atproto.server.createInviteCode" | jq --raw-output '.code'
4545+)"
4646+RESULT="$(curl_cmd_post_nofail \
4747+ --data "{\"email\":\"${EMAIL}\", \"handle\":\"${USERNAME}.${PDS_HOSTNAME}\", \"password\":\"${PASSWORD}\", \"inviteCode\":\"${INVITE_CODE}\"}" \
4848+ "https://${PDS_HOSTNAME}/xrpc/com.atproto.server.createAccount"
4949+)"
5050+5151+DID="$(echo $RESULT | jq --raw-output '.did')"
5252+if [[ "${DID}" != did:* ]]; then
5353+ ERR="$(echo ${RESULT} | jq --raw-output '.message')"
5454+ echo "ERROR: ${ERR}" >/dev/stderr
5555+ echo "Usage: $0 <EMAIL> <HANDLE>" >/dev/stderr
5656+ exit 1
5757+fi
5858+5959+echo
6060+echo "Account created successfully!"
6161+echo "-----------------------------"
6262+echo "Handle : ${USERNAME}.${PDS_HOSTNAME}"
6363+echo "DID : ${DID}"
6464+echo "Password : ${PASSWORD}"
6565+echo "-----------------------------"
6666+echo "This is a test account with an insecure password."
6767+echo "Make sure it's only used for development."
6868+echo
···33author: The Tangled Contributors
44date: 21 Sun, Dec 2025
55abstract: |
66- Tangled is a decentralized code hosting and collaboration
77- platform. Every component of Tangled is open-source and
88- self-hostable. [tangled.org](https://tangled.org) also
99- provides hosting and CI services that are free to use.
66+ Tangled is a decentralized code hosting and collaboration
77+ platform. Every component of Tangled is open-source and
88+ self-hostable. [tangled.org](https://tangled.org) also
99+ provides hosting and CI services that are free to use.
10101111- There are several models for decentralized code
1212- collaboration platforms, ranging from ActivityPub’s
1313- (Forgejo) federated model, to Radicle’s entirely P2P model.
1414- Our approach attempts to be the best of both worlds by
1515- adopting the AT Protocol—a protocol for building decentralized
1616- social applications with a central identity
1111+ There are several models for decentralized code
1212+ collaboration platforms, ranging from ActivityPub’s
1313+ (Forgejo) federated model, to Radicle’s entirely P2P model.
1414+ Our approach attempts to be the best of both worlds by
1515+ adopting the AT Protocol—a protocol for building decentralized
1616+ social applications with a central identity
17171818- Our approach to this is the idea of “knots”. Knots are
1919- lightweight, headless servers that enable users to host Git
2020- repositories with ease. Knots are designed for either single
2121- or multi-tenant use which is perfect for self-hosting on a
2222- Raspberry Pi at home, or larger “community” servers. By
2323- default, Tangled provides managed knots where you can host
2424- your repositories for free.
1818+ Our approach to this is the idea of “knots”. Knots are
1919+ lightweight, headless servers that enable users to host Git
2020+ repositories with ease. Knots are designed for either single
2121+ or multi-tenant use which is perfect for self-hosting on a
2222+ Raspberry Pi at home, or larger “community” servers. By
2323+ default, Tangled provides managed knots where you can host
2424+ your repositories for free.
25252626- The appview at tangled.org acts as a consolidated "view"
2727- into the whole network, allowing users to access, clone and
2828- contribute to repositories hosted across different knots
2929- seamlessly.
2626+ The appview at tangled.org acts as a consolidated "view"
2727+ into the whole network, allowing users to access, clone and
2828+ contribute to repositories hosted across different knots
2929+ seamlessly.
3030---
31313232# Quick start guide
···131131cd my-project
132132133133git init
134134-echo "# My Project" > README.md
134134+echo "# My Project" > README.md
135135```
136136137137Add some content and push!
···313313and operation tool. For the purpose of this guide, we're
314314only concerned with these subcommands:
315315316316- * `knot server`: the main knot server process, typically
317317- run as a supervised service
318318- * `knot guard`: handles role-based access control for git
319319- over SSH (you'll never have to run this yourself)
320320- * `knot keys`: fetches SSH keys associated with your knot;
321321- we'll use this to generate the SSH
322322- `AuthorizedKeysCommand`
316316+- `knot server`: the main knot server process, typically
317317+ run as a supervised service
318318+- `knot guard`: handles role-based access control for git
319319+ over SSH (you'll never have to run this yourself)
320320+- `knot keys`: fetches SSH keys associated with your knot;
321321+ we'll use this to generate the SSH
322322+ `AuthorizedKeysCommand`
323323324324```
325325cd core
···432432can move these paths if you'd like to store them in another folder. Be careful
433433when adjusting these paths:
434434435435-* Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent
436436-any possible side effects. Remember to restart it once you're done.
437437-* Make backups before moving in case something goes wrong.
438438-* Make sure the `git` user can read and write from the new paths.
435435+- Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent
436436+ any possible side effects. Remember to restart it once you're done.
437437+- Make backups before moving in case something goes wrong.
438438+- Make sure the `git` user can read and write from the new paths.
439439440440#### Database
441441···502502Note that you should add a newline at the end if setting a non-empty message
503503since the knot won't do this for you.
504504505505+## Troubleshooting
506506+507507+If you run your own knot, you may run into some of these
508508+common issues. You can always join the
509509+[IRC](https://web.libera.chat/#tangled) or
510510+[Discord](https://chat.tangled.org/) if this section does
511511+not help.
512512+513513+### Unable to push
514514+515515+If you are unable to push to your knot or repository:
516516+517517+1. First, ensure that you have added your SSH public key to
518518+ your account
519519+2. Check to see that your knot has synced the key by running
520520+ `knot keys`
521521+3. Check to see if git is supplying the correct private key
522522+ when pushing: `GIT_SSH_COMMAND="ssh -v" git push ...`
523523+4. Check to see if `sshd` on the knot is rejecting the push
524524+ for some reason: `journalctl -xeu ssh` (or `sshd`,
525525+ depending on your machine). These logs are unavailable if
526526+ using docker.
527527+5. Check to see if the knot itself is rejecting the push,
528528+ depending on your setup, the logs might be in one of the
529529+ following paths:
530530+ - `/tmp/knotguard.log`
531531+ - `/home/git/log`
532532+ - `/home/git/guard.log`
533533+505534# Spindles
506535507536## Pipelines
···650679key-value map, with the key being the registry to fetch
651680dependencies from, and the value being the list of
652681dependencies to fetch.
682682+683683+The registry URL syntax can be found [on the nix
684684+manual](https://nix.dev/manual/nix/2.18/command-ref/new-cli/nix3-registry-add).
653685654686Say you want to fetch Node.js and Go from `nixpkgs`, and a
655687package called `my_pkg` you've made from your own registry
···818850819851### Prerequisites
820852821821-* Go
822822-* Docker (the only supported backend currently)
853853+- Go
854854+- Docker (the only supported backend currently)
823855824856### Configuration
825857826858Spindle is configured using environment variables. The following environment variables are available:
827859828828-* `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`).
829829-* `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`).
830830-* `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required).
831831-* `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`).
832832-* `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`).
833833-* `SPINDLE_SERVER_OWNER`: The DID of the owner (required).
834834-* `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`).
835835-* `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`).
836836-* `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`).
860860+- `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`).
861861+- `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`).
862862+- `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required).
863863+- `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`).
864864+- `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`).
865865+- `SPINDLE_SERVER_OWNER`: The DID of the owner (required).
866866+- `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`).
867867+- `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`).
868868+- `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`).
837869838870### Running spindle
839871840840-1. **Set the environment variables.** For example:
872872+1. **Set the environment variables.** For example:
841873842874 ```shell
843875 export SPINDLE_SERVER_HOSTNAME="your-hostname"
···871903872904Spindle is a small CI runner service. Here's a high-level overview of how it operates:
873905874874-* Listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and
875875-[`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream.
876876-* When a new repo record comes through (typically when you add a spindle to a
877877-repo from the settings), spindle then resolves the underlying knot and
878878-subscribes to repo events (see:
879879-[`sh.tangled.pipeline`](/lexicons/pipeline.json)).
880880-* The spindle engine then handles execution of the pipeline, with results and
881881-logs beamed on the spindle event stream over WebSocket
906906+- Listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and
907907+ [`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream.
908908+- When a new repo record comes through (typically when you add a spindle to a
909909+ repo from the settings), spindle then resolves the underlying knot and
910910+ subscribes to repo events (see:
911911+ [`sh.tangled.pipeline`](/lexicons/pipeline.json)).
912912+- The spindle engine then handles execution of the pipeline, with results and
913913+ logs beamed on the spindle event stream over WebSocket
882914883915### The engine
884916···11921224 secret_id="$(cat /tmp/openbao/secret-id)"
11931225```
1194122612271227+# Webhooks
12281228+12291229+Webhooks allow you to receive HTTP POST notifications when events occur in your repositories. This enables you to integrate Tangled with external services, trigger CI/CD pipelines, send notifications, or automate workflows.
12301230+12311231+## Overview
12321232+12331233+Webhooks send HTTP POST requests to URLs you configure whenever specific events happen. Currently, Tangled supports push events, with more event types coming soon.
12341234+12351235+## Configuring webhooks
12361236+12371237+To set up a webhook for your repository:
12381238+12391239+1. Navigate to your repository settings
12401240+2. Click the "hooks" tab
12411241+3. Click "add webhook"
12421242+4. Configure your webhook:
12431243+ - **Payload URL**: The endpoint that will receive the webhook POST requests
12441244+ - **Secret**: An optional secret key for verifying webhook authenticity (auto-generated if left blank)
12451245+ - **Events**: Select which events trigger the webhook (currently only push events)
12461246+ - **Active**: Toggle whether the webhook is enabled
12471247+12481248+## Webhook payload
12491249+12501250+### Push
12511251+12521252+When a push event occurs, Tangled sends a POST request with a JSON payload of the format:
12531253+12541254+```json
12551255+{
12561256+ "after": "7b320e5cbee2734071e4310c1d9ae401d8f6cab5",
12571257+ "before": "c04ddf64eddc90e4e2a9846ba3b43e67a0e2865e",
12581258+ "pusher": {
12591259+ "did": "did:plc:hwevmowznbiukdf6uk5dwrrq"
12601260+ },
12611261+ "ref": "refs/heads/main",
12621262+ "repository": {
12631263+ "clone_url": "https://tangled.org/did:plc:hwevmowznbiukdf6uk5dwrrq/some-repo",
12641264+ "created_at": "2025-09-15T08:57:23Z",
12651265+ "description": "an example repository",
12661266+ "fork": false,
12671267+ "full_name": "did:plc:hwevmowznbiukdf6uk5dwrrq/some-repo",
12681268+ "html_url": "https://tangled.org/did:plc:hwevmowznbiukdf6uk5dwrrq/some-repo",
12691269+ "name": "some-repo",
12701270+ "open_issues_count": 5,
12711271+ "owner": {
12721272+ "did": "did:plc:hwevmowznbiukdf6uk5dwrrq"
12731273+ },
12741274+ "ssh_url": "ssh://git@tangled.org/did:plc:hwevmowznbiukdf6uk5dwrrq/some-repo",
12751275+ "stars_count": 1,
12761276+ "updated_at": "2025-09-15T08:57:23Z"
12771277+ }
12781278+}
12791279+```
12801280+12811281+## HTTP headers
12821282+12831283+Each webhook request includes the following headers:
12841284+12851285+- `Content-Type: application/json`
12861286+- `User-Agent: Tangled-Hook/<short-sha>` — User agent with short SHA of the commit
12871287+- `X-Tangled-Event: push` — The event type
12881288+- `X-Tangled-Hook-ID: <webhook-id>` — The webhook ID
12891289+- `X-Tangled-Delivery: <uuid>` — Unique delivery ID
12901290+- `X-Tangled-Signature-256: sha256=<hmac>` — HMAC-SHA256 signature (if secret configured)
12911291+12921292+## Verifying webhook signatures
12931293+12941294+If you configured a secret, you should verify the webhook signature to ensure requests are authentic. For example, in Go:
12951295+12961296+```go
12971297+package main
12981298+12991299+import (
13001300+ "crypto/hmac"
13011301+ "crypto/sha256"
13021302+ "encoding/hex"
13031303+ "io"
13041304+ "net/http"
13051305+ "strings"
13061306+)
13071307+13081308+func verifySignature(payload []byte, signatureHeader, secret string) bool {
13091309+ // Remove 'sha256=' prefix from signature header
13101310+ signature := strings.TrimPrefix(signatureHeader, "sha256=")
13111311+13121312+ // Compute expected signature
13131313+ mac := hmac.New(sha256.New, []byte(secret))
13141314+ mac.Write(payload)
13151315+ expected := hex.EncodeToString(mac.Sum(nil))
13161316+13171317+ // Use constant-time comparison to prevent timing attacks
13181318+ return hmac.Equal([]byte(signature), []byte(expected))
13191319+}
13201320+13211321+func webhookHandler(w http.ResponseWriter, r *http.Request) {
13221322+ // Read the request body
13231323+ payload, err := io.ReadAll(r.Body)
13241324+ if err != nil {
13251325+ http.Error(w, "Bad request", http.StatusBadRequest)
13261326+ return
13271327+ }
13281328+13291329+ // Get signature from header
13301330+ signatureHeader := r.Header.Get("X-Tangled-Signature-256")
13311331+13321332+ // Verify signature
13331333+ if signatureHeader != "" && verifySignature(payload, signatureHeader, yourSecret) {
13341334+ // Webhook is authentic, process it
13351335+ processWebhook(payload)
13361336+ w.WriteHeader(http.StatusOK)
13371337+ } else {
13381338+ http.Error(w, "Invalid signature", http.StatusUnauthorized)
13391339+ }
13401340+}
13411341+```
13421342+13431343+## Delivery retries
13441344+13451345+Webhooks are automatically retried on failure:
13461346+13471347+- **3 total attempts** (1 initial + 2 retries)
13481348+- **Exponential backoff** starting at 1 second, max 10 seconds
13491349+- **Retried on**:
13501350+ - Network errors
13511351+ - HTTP 5xx server errors
13521352+- **Not retried on**:
13531353+ - HTTP 4xx client errors (bad request, unauthorized, etc.)
13541354+13551355+### Timeouts
13561356+13571357+Webhook requests timeout after 30 seconds. If your endpoint needs more time:
13581358+13591359+1. Respond with 200 OK immediately
13601360+2. Process the webhook asynchronously in the background
13611361+13621362+## Example integrations
13631363+13641364+### Discord notifications
13651365+13661366+```javascript
13671367+app.post("/webhook", (req, res) => {
13681368+ const payload = req.body;
13691369+13701370+ fetch("https://discord.com/api/webhooks/...", {
13711371+ method: "POST",
13721372+ headers: { "Content-Type": "application/json" },
13731373+ body: JSON.stringify({
13741374+ content: `New push to ${payload.repository.full_name}`,
13751375+ embeds: [
13761376+ {
13771377+ title: `${payload.pusher.did} pushed to ${payload.ref}`,
13781378+ url: payload.repository.html_url,
13791379+ color: 0x00ff00,
13801380+ },
13811381+ ],
13821382+ }),
13831383+ });
13841384+13851385+ res.status(200).send("OK");
13861386+});
13871387+```
13881388+11951389# Migrating knots and spindles
1196139011971391Sometimes, non-backwards compatible changes are made to the
···13271521<details>
13281522 <summary><strong>macOS users will have to set up a Nix Builder first</strong></summary>
1329152313301330- In order to build Tangled's dev VM on macOS, you will
13311331- first need to set up a Linux Nix builder. The recommended
13321332- way to do so is to run a [`darwin.linux-builder`
13331333- VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
13341334- and to register it in `nix.conf` as a builder for Linux
13351335- with the same architecture as your Mac (`linux-aarch64` if
13361336- you are using Apple Silicon).
15241524+In order to build Tangled's dev VM on macOS, you will
15251525+first need to set up a Linux Nix builder. The recommended
15261526+way to do so is to run a [`darwin.linux-builder`
15271527+VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
15281528+and to register it in `nix.conf` as a builder for Linux
15291529+with the same architecture as your Mac (`linux-aarch64` if
15301530+you are using Apple Silicon).
1337153113381338- > IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
13391339- > the Tangled repo so that it doesn't conflict with the other VM. For example,
13401340- > you can do
13411341- >
13421342- > ```shell
13431343- > cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
13441344- > ```
13451345- >
13461346- > to store the builder VM in a temporary dir.
13471347- >
13481348- > You should read and follow [all the other intructions][darwin builder vm] to
13491349- > avoid subtle problems.
15321532+> IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
15331533+> the Tangled repo so that it doesn't conflict with the other VM. For example,
15341534+> you can do
15351535+>
15361536+> ```shell
15371537+> cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
15381538+> ```
15391539+>
15401540+> to store the builder VM in a temporary dir.
15411541+>
15421542+> You should read and follow [all the other intructions][darwin builder vm] to
15431543+> avoid subtle problems.
1350154413511351- Alternatively, you can use any other method to set up a
13521352- Linux machine with Nix installed that you can `sudo ssh`
13531353- into (in other words, root user on your Mac has to be able
13541354- to ssh into the Linux machine without entering a password)
13551355- and that has the same architecture as your Mac. See
13561356- [remote builder
13571357- instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
13581358- for how to register such a builder in `nix.conf`.
15451545+Alternatively, you can use any other method to set up a
15461546+Linux machine with Nix installed that you can `sudo ssh`
15471547+into (in other words, root user on your Mac has to be able
15481548+to ssh into the Linux machine without entering a password)
15491549+and that has the same architecture as your Mac. See
15501550+[remote builder
15511551+instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
15521552+for how to register such a builder in `nix.conf`.
1359155313601360- > WARNING: If you'd like to use
13611361- > [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
13621362- > [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
13631363- > ssh` works can be tricky. It seems to be [possible with
13641364- > Orbstack](https://github.com/orgs/orbstack/discussions/1669).
15541554+> WARNING: If you'd like to use
15551555+> [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
15561556+> [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
15571557+ssh` works can be tricky. It seems to be [possible with
15581558+> Orbstack](https://github.com/orgs/orbstack/discussions/1669).
1365155913661560</details>
13671561···1434162814351629We follow a commit style similar to the Go project. Please keep commits:
1436163014371437-* **atomic**: each commit should represent one logical change
14381438-* **descriptive**: the commit message should clearly describe what the
14391439-change does and why it's needed
16311631+- **atomic**: each commit should represent one logical change
16321632+- **descriptive**: the commit message should clearly describe what the
16331633+ change does and why it's needed
1440163414411635### Message format
14421636···14621656knotserver/git/service: improve error checking in upload-pack
14631657```
1464165814651465-14661659### General notes
1467166014681661- PRs get merged "as-is" (fast-forward)—like applying a patch-series
14691469-using `git am`. At present, there is no squashing—so please author
14701470-your commits as they would appear on `master`, following the above
14711471-guidelines.
16621662+ using `git am`. At present, there is no squashing—so please author
16631663+ your commits as they would appear on `master`, following the above
16641664+ guidelines.
14721665- If there is a lot of nesting, for example "appview:
14731473-pages/templates/repo/fragments: ...", these can be truncated down to
14741474-just "appview: repo/fragments: ...". If the change affects a lot of
14751475-subdirectories, you may abbreviate to just the top-level names, e.g.
14761476-"appview: ..." or "knotserver: ...".
16661666+ pages/templates/repo/fragments: ...", these can be truncated down to
16671667+ just "appview: repo/fragments: ...". If the change affects a lot of
16681668+ subdirectories, you may abbreviate to just the top-level names, e.g.
16691669+ "appview: ..." or "knotserver: ...".
14771670- Keep commits lowercased with no trailing period.
14781671- Use the imperative mood in the summary line (e.g., "fix bug" not
14791479-"fixed bug" or "fixes bug").
16721672+ "fixed bug" or "fixes bug").
14801673- Try to keep the summary line under 72 characters, but we aren't too
14811481-fussed about this.
16741674+ fussed about this.
14821675- Follow the same formatting for PR titles if filled manually.
14831676- Don't include unrelated changes in the same commit.
14841677- Avoid noisy commit messages like "wip" or "final fix"—rewrite history
14851485-before submitting if necessary.
16781678+ before submitting if necessary.
1486167914871680## Code formatting
14881681···15611754Refer to the [jujutsu
15621755documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
15631756for more information.
17571757+17581758+# Troubleshooting guide
17591759+17601760+## Login issues
17611761+17621762+Owing to the distributed nature of OAuth on AT Protocol, you
17631763+may run into issues with logging in. If you run a
17641764+self-hosted PDS:
17651765+17661766+- You may need to ensure that your PDS is timesynced using
17671767+ NTP:
17681768+ - Enable the `ntpd` service
17691769+ - Run `ntpd -qg` to synchronize your clock
17701770+- You may need to increase the default request timeout:
17711771+ `NODE_OPTIONS="--network-family-autoselection-attempt-timeout=500"`
17721772+17731773+## Empty punchcard
17741774+17751775+For Tangled to register commits that you make across the
17761776+network, you need to setup one of following:
17771777+17781778+- The committer email should be a verified email associated
17791779+ to your account. You can add and verify emails on the
17801780+ settings page.
17811781+- Or, the committer email should be set to your account's
17821782+ DID: `git config user.email "did:plc:foobar"`. You can find
17831783+ your account's DID on the settings page
17841784+17851785+## Commit is not marked as verified
17861786+17871787+Presently, Tangled only supports SSH commit signatures.
17881788+17891789+To sign commits using an SSH key with git:
17901790+17911791+```
17921792+git config --global gpg.format ssh
17931793+git config --global user.signingkey ~/.ssh/tangled-key
17941794+```
17951795+17961796+To sign commits using an SSH key with jj, add this to your
17971797+config:
17981798+17991799+```
18001800+[signing]
18011801+behavior = "own"
18021802+backend = "ssh"
18031803+key = "~/.ssh/tangled-key"
18041804+```
18051805+18061806+## Self-hosted knot issues
18071807+18081808+If you need help troubleshooting a self-hosted knot, check
18091809+out the [knot troubleshooting
18101810+guide](/knot-self-hosting-guide.html#troubleshooting).
···17171818// dependencyStep processes dependencies defined in the workflow.
1919// For dependencies using a custom registry (i.e. not nixpkgs), it collects
2020-// all packages and adds a single 'nix profile install' step to the
2020+// all packages and adds a single 'nix profile add' step to the
2121// beginning of the workflow's step list.
2222func dependencyStep(deps map[string][]string) *Step {
2323 var customPackages []string
···3737 }
38383939 if len(customPackages) > 0 {
4040- installCmd := "nix --extra-experimental-features nix-command --extra-experimental-features flakes profile install"
4040+ installCmd := "nix --extra-experimental-features nix-command --extra-experimental-features flakes profile add"
4141 cmd := fmt.Sprintf("%s %s", installCmd, strings.Join(customPackages, " "))
4242 installStep := Step{
4343 command: cmd,