this repo has no description
1const std = @import("std");
2const Secp256k1 = std.crypto.ecc.Secp256k1;
3const StdFe = Secp256k1.Fe;
4
5// 5×52-bit unsaturated limb representation (libsecp256k1 style).
6// Each limb holds up to 52 bits of value with 12 bits of headroom in u64.
7// p = 2^256 - 2^32 - 977, so R = 2^256 mod p = 0x1000003D1.
8const M: u64 = 0xFFFFFFFFFFFFF; // 52-bit mask
9const R: u64 = 0x1000003D10; // 2^256 mod p (shifted by 4 for limb4's 48-bit width)
10
11// secp256k1 prime p in 5×52
12const P0: u64 = 0xFFFFEFFFFFC2F;
13const P4: u64 = 0xFFFFFFFFFFFF; // 48 bits (limb 4 is only 48 bits wide)
14
15/// Optimized secp256k1 field element: 5 × 52-bit limbs in 64-bit words.
16///
17/// 12 bits of headroom per limb (4 bits in MSW) enables carry-free add/sub
18/// for magnitudes up to ~4096. Mul/sq use libsecp256k1's interleaved
19/// reduction with R = 2^256 mod p, producing fully reduced output.
20pub const Fe = struct {
21 n: [5]u64,
22
23 pub const zero: Fe = .{ .n = .{ 0, 0, 0, 0, 0 } };
24 pub const one: Fe = .{ .n = .{ 1, 0, 0, 0, 0 } };
25
26 /// Pack a 32-byte big-endian value into 5×52-bit limbs.
27 pub fn fromBytes(b: [32]u8, comptime endian: std.builtin.Endian) Fe {
28 const s = if (endian == .big) b else blk: {
29 var rev: [32]u8 = undefined;
30 for (0..32) |i| rev[i] = b[31 - i];
31 break :blk rev;
32 };
33 // s is big-endian: s[0] is MSB, s[31] is LSB
34 // limb 0 = bits 0..51 (least significant)
35 // limb 4 = bits 208..255 (most significant, 48 bits)
36 return .{ .n = .{
37 @as(u64, s[31]) | @as(u64, s[30]) << 8 | @as(u64, s[29]) << 16 |
38 @as(u64, s[28]) << 24 | @as(u64, s[27]) << 32 | @as(u64, s[26]) << 40 |
39 (@as(u64, s[25]) & 0xF) << 48,
40 @as(u64, s[25]) >> 4 | @as(u64, s[24]) << 4 | @as(u64, s[23]) << 12 |
41 @as(u64, s[22]) << 20 | @as(u64, s[21]) << 28 | @as(u64, s[20]) << 36 |
42 @as(u64, s[19]) << 44,
43 @as(u64, s[18]) | @as(u64, s[17]) << 8 | @as(u64, s[16]) << 16 |
44 @as(u64, s[15]) << 24 | @as(u64, s[14]) << 32 | @as(u64, s[13]) << 40 |
45 (@as(u64, s[12]) & 0xF) << 48,
46 @as(u64, s[12]) >> 4 | @as(u64, s[11]) << 4 | @as(u64, s[10]) << 12 |
47 @as(u64, s[9]) << 20 | @as(u64, s[8]) << 28 | @as(u64, s[7]) << 36 |
48 @as(u64, s[6]) << 44,
49 @as(u64, s[5]) | @as(u64, s[4]) << 8 | @as(u64, s[3]) << 16 |
50 @as(u64, s[2]) << 24 | @as(u64, s[1]) << 32 | @as(u64, s[0]) << 40,
51 } };
52 }
53
54 /// Unpack 5×52-bit limbs to a 32-byte big-endian value.
55 /// The field value MUST be normalized first.
56 pub fn toBytes(self: Fe, comptime endian: std.builtin.Endian) [32]u8 {
57 const n = self.n;
58 var b: [32]u8 = undefined;
59 // limb 0: bits 0..51
60 b[31] = @truncate(n[0]);
61 b[30] = @truncate(n[0] >> 8);
62 b[29] = @truncate(n[0] >> 16);
63 b[28] = @truncate(n[0] >> 24);
64 b[27] = @truncate(n[0] >> 32);
65 b[26] = @truncate(n[0] >> 40);
66 // limb 1: bits 52..103
67 b[25] = @truncate((n[0] >> 48) | (n[1] << 4));
68 b[24] = @truncate(n[1] >> 4);
69 b[23] = @truncate(n[1] >> 12);
70 b[22] = @truncate(n[1] >> 20);
71 b[21] = @truncate(n[1] >> 28);
72 b[20] = @truncate(n[1] >> 36);
73 b[19] = @truncate(n[1] >> 44);
74 // limb 2: bits 104..155
75 b[18] = @truncate(n[2]);
76 b[17] = @truncate(n[2] >> 8);
77 b[16] = @truncate(n[2] >> 16);
78 b[15] = @truncate(n[2] >> 24);
79 b[14] = @truncate(n[2] >> 32);
80 b[13] = @truncate(n[2] >> 40);
81 // limb 3: bits 156..207
82 b[12] = @truncate((n[2] >> 48) | (n[3] << 4));
83 b[11] = @truncate(n[3] >> 4);
84 b[10] = @truncate(n[3] >> 12);
85 b[9] = @truncate(n[3] >> 20);
86 b[8] = @truncate(n[3] >> 28);
87 b[7] = @truncate(n[3] >> 36);
88 b[6] = @truncate(n[3] >> 44);
89 // limb 4: bits 208..255 (48 bits)
90 b[5] = @truncate(n[4]);
91 b[4] = @truncate(n[4] >> 8);
92 b[3] = @truncate(n[4] >> 16);
93 b[2] = @truncate(n[4] >> 24);
94 b[1] = @truncate(n[4] >> 32);
95 b[0] = @truncate(n[4] >> 40);
96 if (endian == .little) {
97 var rev: [32]u8 = undefined;
98 for (0..32) |i| rev[i] = b[31 - i];
99 return rev;
100 }
101 return b;
102 }
103
104 /// Carry propagation + mod-p reduction.
105 pub fn normalize(self: Fe) Fe {
106 var t0 = self.n[0];
107 var t1 = self.n[1];
108 var t2 = self.n[2];
109 var t3 = self.n[3];
110 var t4 = self.n[4];
111
112 // propagate carries
113 var m = t0 >> 52;
114 t0 &= M;
115 t1 += m;
116 m = t1 >> 52;
117 t1 &= M;
118 t2 += m;
119 m = t2 >> 52;
120 t2 &= M;
121 t3 += m;
122 m = t3 >> 52;
123 t3 &= M;
124 t4 += m;
125
126 // reduce: if t4 overflows 48 bits, fold back via p
127 m = t4 >> 48;
128 t4 &= 0xFFFFFFFFFFFF;
129 t0 += m * 0x1000003D1;
130 // re-propagate
131 m = t0 >> 52;
132 t0 &= M;
133 t1 += m;
134 m = t1 >> 52;
135 t1 &= M;
136 t2 += m;
137 m = t2 >> 52;
138 t2 &= M;
139 t3 += m;
140 m = t3 >> 52;
141 t3 &= M;
142 t4 += m;
143
144 // conditional subtract p
145 // check if value >= p
146 const ge: u64 = if (t4 == 0xFFFFFFFFFFFF and
147 t3 == M and t2 == M and t1 == M and
148 t0 >= P0) 1 else 0;
149 t0 += ge * 0x1000003D1;
150 m = t0 >> 52;
151 t0 &= M;
152 t1 += m;
153 m = t1 >> 52;
154 t1 &= M;
155 t2 += m;
156 m = t2 >> 52;
157 t2 &= M;
158 t3 += m;
159 m = t3 >> 52;
160 t3 &= M;
161 t4 += m;
162 t4 &= 0xFFFFFFFFFFFF;
163
164 return .{ .n = .{ t0, t1, t2, t3, t4 } };
165 }
166
167 /// Returns true if the field value equals zero (after normalizing).
168 pub fn isZero(self: Fe) bool {
169 const f = self.normalize();
170 return (f.n[0] | f.n[1] | f.n[2] | f.n[3] | f.n[4]) == 0;
171 }
172
173 /// Returns true if two field values are equal (after normalizing both).
174 pub fn equivalent(self: Fe, other: Fe) bool {
175 const a = self.normalize();
176 const b = other.normalize();
177 return ((a.n[0] ^ b.n[0]) | (a.n[1] ^ b.n[1]) | (a.n[2] ^ b.n[2]) |
178 (a.n[3] ^ b.n[3]) | (a.n[4] ^ b.n[4])) == 0;
179 }
180
181 /// Carry-free addition.
182 pub fn add(self: Fe, other: Fe) Fe {
183 return .{ .n = .{
184 self.n[0] + other.n[0],
185 self.n[1] + other.n[1],
186 self.n[2] + other.n[2],
187 self.n[3] + other.n[3],
188 self.n[4] + other.n[4],
189 } };
190 }
191
192 /// Negate: result = (magnitude+1)*p - self.
193 pub fn neg(self: Fe, magnitude: u32) Fe {
194 const m: u64 = @as(u64, magnitude) + 1;
195 return .{ .n = .{
196 m * P0 - self.n[0],
197 m * M - self.n[1],
198 m * M - self.n[2],
199 m * M - self.n[3],
200 m * P4 - self.n[4],
201 } };
202 }
203
204 /// sub = self + (-other).
205 pub fn subMag(self: Fe, other: Fe, other_mag: u32) Fe {
206 return self.add(other.neg(other_mag));
207 }
208
209 /// sub with auto-normalize.
210 pub fn sub(self: Fe, other: Fe) Fe {
211 return self.add(other.normalize().neg(1));
212 }
213
214 /// Double via carry-free addition.
215 pub fn dbl(self: Fe) Fe {
216 return self.add(self);
217 }
218
219 /// Multiply by small integer.
220 pub fn mulInt(self: Fe, val: u8) Fe {
221 const v: u64 = val;
222 return .{ .n = .{
223 self.n[0] * v, self.n[1] * v, self.n[2] * v,
224 self.n[3] * v, self.n[4] * v,
225 } };
226 }
227
228 /// 5×52 multiplication with interleaved secp256k1 reduction.
229 /// Ported from libsecp256k1's secp256k1_fe_mul_inner.
230 /// Uses u128 accumulators with R = 0x1000003D10.
231 pub fn mul(self: Fe, other: Fe) Fe {
232 const a = self.n;
233 const b = other.n;
234
235 // column 3: d = p3
236 var d: u128 = @as(u128, a[0]) * b[3] + @as(u128, a[1]) * b[2] +
237 @as(u128, a[2]) * b[1] + @as(u128, a[3]) * b[0];
238
239 // column 8: c = p8
240 var c: u128 = @as(u128, a[4]) * b[4];
241
242 // reduce p8 into d: d += low64(c) * R, c >>= 64
243 d += @as(u128, R) * @as(u64, @truncate(c));
244 c >>= 64;
245
246 const t3 = @as(u64, @truncate(d)) & M;
247 d >>= 52;
248
249 // column 4: d += p4
250 d += @as(u128, a[0]) * b[4] + @as(u128, a[1]) * b[3] +
251 @as(u128, a[2]) * b[2] + @as(u128, a[3]) * b[1] + @as(u128, a[4]) * b[0];
252 // reduce remaining c
253 d += @as(u128, R << 12) * @as(u64, @truncate(c));
254
255 var t4 = @as(u64, @truncate(d)) & M;
256 d >>= 52;
257 const tx = t4 >> 48;
258 t4 &= (M >> 4);
259
260 // column 0: c = p0
261 c = @as(u128, a[0]) * b[0];
262
263 // column 5: d += p5
264 d += @as(u128, a[1]) * b[4] + @as(u128, a[2]) * b[3] +
265 @as(u128, a[3]) * b[2] + @as(u128, a[4]) * b[1];
266 var u_0 = @as(u64, @truncate(d)) & M;
267 d >>= 52;
268
269 u_0 = (u_0 << 4) | tx;
270 c += @as(u128, u_0) * (R >> 4);
271
272 const r0 = @as(u64, @truncate(c)) & M;
273 c >>= 52;
274
275 // column 1: c += p1
276 c += @as(u128, a[0]) * b[1] + @as(u128, a[1]) * b[0];
277
278 // column 6: d += p6
279 d += @as(u128, a[2]) * b[4] + @as(u128, a[3]) * b[3] + @as(u128, a[4]) * b[2];
280 // reduce low 52 bits of d into c
281 c += @as(u128, R) * (@as(u64, @truncate(d)) & M);
282 d >>= 52;
283
284 const r1 = @as(u64, @truncate(c)) & M;
285 c >>= 52;
286
287 // column 2: c += p2
288 c += @as(u128, a[0]) * b[2] + @as(u128, a[1]) * b[1] + @as(u128, a[2]) * b[0];
289
290 // column 7: d += p7
291 d += @as(u128, a[3]) * b[4] + @as(u128, a[4]) * b[3];
292 // reduce: c += low64(d) * R, d >>= 64
293 c += @as(u128, R) * @as(u64, @truncate(d));
294 d >>= 64;
295
296 const r2 = @as(u64, @truncate(c)) & M;
297 c >>= 52;
298
299 // finalize r3, r4
300 c += @as(u128, R << 12) * @as(u64, @truncate(d));
301 c += t3;
302
303 const r3 = @as(u64, @truncate(c)) & M;
304 c >>= 52;
305
306 const r4 = @as(u64, @truncate(c)) + t4;
307
308 return .{ .n = .{ r0, r1, r2, r3, r4 } };
309 }
310
311 /// 5×52 squaring with interleaved secp256k1 reduction.
312 /// Ported from libsecp256k1's secp256k1_fe_sqr_inner.
313 pub fn sq(self: Fe) Fe {
314 const a = self.n;
315
316 // column 3: d = p3 = 2*a0*a3 + 2*a1*a2
317 var d: u128 = @as(u128, a[0] * 2) * a[3] + @as(u128, a[1] * 2) * a[2];
318
319 // column 8: c = p8 = a4*a4
320 var c: u128 = @as(u128, a[4]) * a[4];
321
322 d += @as(u128, R) * @as(u64, @truncate(c));
323 c >>= 64;
324
325 const t3 = @as(u64, @truncate(d)) & M;
326 d >>= 52;
327
328 // column 4: d += p4 = a0*2*a4 + 2*a1*a3 + a2*a2
329 const a4x2 = a[4] * 2;
330 d += @as(u128, a[0]) * a4x2 + @as(u128, a[1] * 2) * a[3] + @as(u128, a[2]) * a[2];
331 d += @as(u128, R << 12) * @as(u64, @truncate(c));
332
333 var t4 = @as(u64, @truncate(d)) & M;
334 d >>= 52;
335 const tx = t4 >> 48;
336 t4 &= (M >> 4);
337
338 // column 0: c = p0 = a0*a0
339 c = @as(u128, a[0]) * a[0];
340
341 // column 5: d += p5 = a1*2*a4 + 2*a2*a3
342 d += @as(u128, a[1]) * a4x2 + @as(u128, a[2] * 2) * a[3];
343 var u_0 = @as(u64, @truncate(d)) & M;
344 d >>= 52;
345
346 u_0 = (u_0 << 4) | tx;
347 c += @as(u128, u_0) * (R >> 4);
348
349 const r0 = @as(u64, @truncate(c)) & M;
350 c >>= 52;
351
352 // column 1: c += p1 = 2*a0*a1
353 const a0x2 = a[0] * 2;
354 c += @as(u128, a0x2) * a[1];
355
356 // column 6: d += p6 = a2*2*a4 + a3*a3
357 d += @as(u128, a[2]) * a4x2 + @as(u128, a[3]) * a[3];
358 c += @as(u128, R) * (@as(u64, @truncate(d)) & M);
359 d >>= 52;
360
361 const r1 = @as(u64, @truncate(c)) & M;
362 c >>= 52;
363
364 // column 2: c += p2 = 2*a0*a2 + a1*a1
365 c += @as(u128, a0x2) * a[2] + @as(u128, a[1]) * a[1];
366
367 // column 7: d += p7 = a3*2*a4
368 d += @as(u128, a[3]) * a4x2;
369 c += @as(u128, R) * @as(u64, @truncate(d));
370 d >>= 64;
371
372 const r2 = @as(u64, @truncate(c)) & M;
373 c >>= 52;
374
375 c += @as(u128, R << 12) * @as(u64, @truncate(d));
376 c += t3;
377
378 const r3 = @as(u64, @truncate(c)) & M;
379 c >>= 52;
380
381 const r4 = @as(u64, @truncate(c)) + t4;
382
383 return .{ .n = .{ r0, r1, r2, r3, r4 } };
384 }
385
386 /// Comptime conversion from integer literal to Fe.
387 pub fn fromInt(comptime val: u256) Fe {
388 var b: [32]u8 = undefined;
389 std.mem.writeInt(u256, &b, val, .big);
390 return fromBytes(b, .big);
391 }
392
393 /// Modular inverse via Fermat's little theorem: a^(p-2) mod p.
394 pub fn invert(self: Fe) Fe {
395 const a = self;
396 const a2 = a.sq().mul(a);
397 const a3 = a2.sq().mul(a);
398 const a6 = sqn(a3, 3).mul(a3);
399 const a9 = sqn(a6, 3).mul(a3);
400 const a11 = sqn(a9, 2).mul(a2);
401 const a22 = sqn(a11, 11).mul(a11);
402 const a44 = sqn(a22, 22).mul(a22);
403 const a88 = sqn(a44, 44).mul(a44);
404 const a176 = sqn(a88, 88).mul(a88);
405 const a220 = sqn(a176, 44).mul(a44);
406 const a223 = sqn(a220, 3).mul(a3);
407
408 var r = sqn(a223, 23).mul(a22);
409 r = sqn(r, 5).mul(a);
410 r = sqn(r, 3).mul(a2);
411 r = sqn(r, 2).mul(a);
412 return r;
413 }
414
415 fn sqn(f: Fe, comptime n: usize) Fe {
416 var r = f;
417 for (0..n) |_| r = r.sq();
418 return r;
419 }
420
421 /// Convert from stdlib Fe to Fe via byte round-trip.
422 pub fn fromStdlib(fe: StdFe) Fe {
423 return fromBytes(fe.toBytes(.big), .big);
424 }
425
426 /// Convert to stdlib Fe via byte round-trip.
427 pub fn toStdlib(self: Fe) StdFe {
428 return StdFe.fromBytes(self.normalize().toBytes(.big), .big) catch unreachable;
429 }
430};
431
432// ============================================================
433// tests
434// ============================================================
435
436const testing = std.testing;
437
438fn stdFeMul(a: StdFe, b: StdFe) StdFe {
439 return a.mul(b);
440}
441
442fn stdFeSq(a: StdFe) StdFe {
443 return a.sq();
444}
445
446test "fromBytes/toBytes round-trip" {
447 const gx_bytes = Secp256k1.basePoint.affineCoordinates().x.toBytes(.big);
448 const fe = Fe.fromBytes(gx_bytes, .big);
449 const out = fe.normalize().toBytes(.big);
450 try testing.expectEqualSlices(u8, &gx_bytes, &out);
451}
452
453test "fromBytes/toBytes little-endian round-trip" {
454 const gx_bytes = Secp256k1.basePoint.affineCoordinates().x.toBytes(.little);
455 const fe = Fe.fromBytes(gx_bytes, .little);
456 const out = fe.normalize().toBytes(.little);
457 try testing.expectEqualSlices(u8, &gx_bytes, &out);
458}
459
460test "zero and one" {
461 try testing.expect(Fe.zero.isZero());
462 try testing.expect(!Fe.one.isZero());
463 try testing.expect(Fe.one.equivalent(Fe.one));
464 try testing.expect(!Fe.zero.equivalent(Fe.one));
465}
466
467test "add matches stdlib" {
468 const g = Secp256k1.basePoint.affineCoordinates();
469 const a = Fe.fromStdlib(g.x);
470 const b = Fe.fromStdlib(g.y);
471 const result = a.add(b).normalize().toBytes(.big);
472 const expected = g.x.add(g.y).toBytes(.big);
473 try testing.expectEqualSlices(u8, &expected, &result);
474}
475
476test "sub matches stdlib" {
477 const g = Secp256k1.basePoint.affineCoordinates();
478 const a = Fe.fromStdlib(g.x);
479 const b = Fe.fromStdlib(g.y);
480 const result = a.sub(b).normalize().toBytes(.big);
481 const expected = g.x.sub(g.y).toBytes(.big);
482 try testing.expectEqualSlices(u8, &expected, &result);
483}
484
485test "neg matches stdlib" {
486 const g = Secp256k1.basePoint.affineCoordinates();
487 const a = Fe.fromStdlib(g.x);
488 const result = a.neg(1).normalize().toBytes(.big);
489 const expected = StdFe.zero.sub(g.x).toBytes(.big);
490 try testing.expectEqualSlices(u8, &expected, &result);
491}
492
493test "mul matches stdlib" {
494 const g = Secp256k1.basePoint.affineCoordinates();
495 const a = Fe.fromStdlib(g.x);
496 const b = Fe.fromStdlib(g.y);
497 const result = a.mul(b).normalize().toBytes(.big);
498 const expected = stdFeMul(g.x, g.y).toBytes(.big);
499 try testing.expectEqualSlices(u8, &expected, &result);
500}
501
502test "sq matches stdlib" {
503 const g = Secp256k1.basePoint.affineCoordinates();
504 const a = Fe.fromStdlib(g.x);
505 const result = a.sq().normalize().toBytes(.big);
506 const expected = stdFeSq(g.x).toBytes(.big);
507 try testing.expectEqualSlices(u8, &expected, &result);
508}
509
510test "sq equals mul(a, a)" {
511 const g = Secp256k1.basePoint.affineCoordinates();
512 const a = Fe.fromStdlib(g.x);
513 try testing.expect(a.sq().equivalent(a.mul(a)));
514
515 const b = Fe.fromStdlib(g.y);
516 try testing.expect(b.sq().equivalent(b.mul(b)));
517}
518
519test "mul(a, invert(a)) == one" {
520 const g = Secp256k1.basePoint.affineCoordinates();
521 const a = Fe.fromStdlib(g.x);
522 const inv = a.invert();
523 const product = a.mul(inv);
524 try testing.expect(product.equivalent(Fe.one));
525}
526
527test "mul(a, invert(a)) == one for y" {
528 const g = Secp256k1.basePoint.affineCoordinates();
529 const a = Fe.fromStdlib(g.y);
530 const inv = a.invert();
531 const product = a.mul(inv);
532 try testing.expect(product.equivalent(Fe.one));
533}
534
535test "edge: p-1" {
536 const p_minus_1 = Fe.fromInt(
537 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2E,
538 );
539 try testing.expect(!p_minus_1.isZero());
540 try testing.expect(p_minus_1.add(Fe.one).isZero());
541}
542
543test "fromInt constants" {
544 const beta = Fe.fromInt(
545 55594575648329892869085402983802832744385952214688224221778511981742606582254,
546 );
547 try testing.expect(!beta.isZero());
548 const beta_cubed = beta.sq().mul(beta);
549 try testing.expect(beta_cubed.equivalent(Fe.one));
550}
551
552test "dbl matches add(self)" {
553 const g = Secp256k1.basePoint.affineCoordinates();
554 const a = Fe.fromStdlib(g.x);
555 try testing.expect(a.dbl().equivalent(a.add(a)));
556}
557
558test "mulInt matches repeated add" {
559 const g = Secp256k1.basePoint.affineCoordinates();
560 const a = Fe.fromStdlib(g.x);
561 try testing.expect(a.mulInt(3).equivalent(a.add(a).add(a)));
562}
563
564test "multiple mul/sq operations stay consistent" {
565 const g = Secp256k1.basePoint.affineCoordinates();
566 const gx26 = Fe.fromStdlib(g.x);
567 const gy26 = Fe.fromStdlib(g.y);
568
569 const result = gx26.sq().mul(gy26);
570 const expected = stdFeSq(g.x).mul(g.y);
571 try testing.expectEqualSlices(u8, &expected.toBytes(.big), &result.normalize().toBytes(.big));
572}
573
574test "normalize idempotent" {
575 const g = Secp256k1.basePoint.affineCoordinates();
576 const a = Fe.fromStdlib(g.x);
577 const n1 = a.normalize();
578 const n2 = n1.normalize();
579 try testing.expectEqualSlices(u8, &std.mem.toBytes(n1.n), &std.mem.toBytes(n2.n));
580}