mirror of
https://github.com/juanfont/headscale.git
synced 2026-01-12 04:10:32 +01:00
Compare commits
957 Commits
ts2021-imp
...
web-auth-f
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
70e08462b3 | ||
|
|
18c0009a51 | ||
|
|
d038df2a88 | ||
|
|
a231ece825 | ||
|
|
ae43d82a33 | ||
|
|
ad3c36fd07 | ||
|
|
f176503448 | ||
|
|
f7ad88aa08 | ||
|
|
f63d22655c | ||
|
|
89c468fc43 | ||
|
|
b0fda6b216 | ||
|
|
d8e9d95a3b | ||
|
|
0e405c7ce0 | ||
|
|
21f0e089b6 | ||
|
|
cfda804726 | ||
|
|
d6b383dd2f | ||
|
|
07f92e647c | ||
|
|
154fb59bdb | ||
|
|
d3e9703fb5 | ||
|
|
7ce3f8c7d1 | ||
|
|
58c8633cc1 | ||
|
|
bf87b33292 | ||
|
|
b3f5af30a4 | ||
|
|
9f64ac8a33 | ||
|
|
aa1cc05cfb | ||
|
|
670ef9a93e | ||
|
|
527b580f5e | ||
|
|
c31328a54a | ||
|
|
b2c0e37122 | ||
|
|
889223e35f | ||
|
|
987abcfdce | ||
|
|
c70f5696dc | ||
|
|
825e88311e | ||
|
|
bbc8cb11da | ||
|
|
3a6ef6bece | ||
|
|
b2dc480f22 | ||
|
|
5d7eae46f8 | ||
|
|
45cb0f3fa3 | ||
|
|
658478cba3 | ||
|
|
ec90e9d716 | ||
|
|
181f1eeb4f | ||
|
|
e270cf6d20 | ||
|
|
6e83b7f06b | ||
|
|
31d427b655 | ||
|
|
d8c856e602 | ||
|
|
aad4c90fe6 | ||
|
|
4f9fe93146 | ||
|
|
96fe6aa3a1 | ||
|
|
947e961a3a | ||
|
|
43731cad2e | ||
|
|
ac15b21720 | ||
|
|
dfc03a6124 | ||
|
|
8a07381e3a | ||
|
|
0cf9c4ce8e | ||
|
|
e8b3de494e | ||
|
|
21ec543d37 | ||
|
|
ca8bca98ed | ||
|
|
4e8b95e6cd | ||
|
|
ad31378d92 | ||
|
|
3a6257b193 | ||
|
|
fafa3f8211 | ||
|
|
62e3fa0011 | ||
|
|
94ad0a1555 | ||
|
|
c1c22a4b51 | ||
|
|
611f7c374c | ||
|
|
91c0a153b0 | ||
|
|
73eae8e2cf | ||
|
|
341db0c5c9 | ||
|
|
2ca286ee8c | ||
|
|
dde39aa24c | ||
|
|
bcdd34b01e | ||
|
|
e45ba37ec5 | ||
|
|
d69a5f621e | ||
|
|
7f69b08bc8 | ||
|
|
5d3c02702b | ||
|
|
1469425484 | ||
|
|
0e12b66706 | ||
|
|
7e6ab19270 | ||
|
|
5013187aaf | ||
|
|
239ef16ad1 | ||
|
|
cb61a490e0 | ||
|
|
2c0488da0b | ||
|
|
a647e6af24 | ||
|
|
fe4e05b0bc | ||
|
|
54e3a0d372 | ||
|
|
e7e2c7804b | ||
|
|
5c9c4f27fe | ||
|
|
21b06f603a | ||
|
|
a14f482ef7 | ||
|
|
86c132c8b2 | ||
|
|
2b10226618 | ||
|
|
23a0946e76 | ||
|
|
7015d72911 | ||
|
|
76689c221d | ||
|
|
8d46986a87 | ||
|
|
b22e628b49 | ||
|
|
9c30939e3f | ||
|
|
018b1d68f2 | ||
|
|
ae189c03ac | ||
|
|
7155b22043 | ||
|
|
40c048fb45 | ||
|
|
53b4bb220d | ||
|
|
d706c3516d | ||
|
|
cbbf9fbdef | ||
|
|
d8144ee2ed | ||
|
|
fa3d21cbc0 | ||
|
|
d242ceac46 | ||
|
|
ecce82d44a | ||
|
|
463180cc2e | ||
|
|
129afdb157 | ||
|
|
701f990a23 | ||
|
|
e112514a3b | ||
|
|
babd303667 | ||
|
|
2d170fe339 | ||
|
|
bc1c1f5ce8 | ||
|
|
830d59fe8c | ||
|
|
c9823ce347 | ||
|
|
8c4744acd9 | ||
|
|
9c16d5e511 | ||
|
|
40b3de9894 | ||
|
|
1eea9c943c | ||
|
|
399c3255ab | ||
|
|
852cb90fcc | ||
|
|
587a016b46 | ||
|
|
b2bca2ac81 | ||
|
|
6d8c18d4de | ||
|
|
12ee9bc02d | ||
|
|
8502a0acda | ||
|
|
36ad0003a9 | ||
|
|
4cb7d63e8b | ||
|
|
2bf50bc205 | ||
|
|
39bc6f7e01 | ||
|
|
0db608a7b7 | ||
|
|
3951f39868 | ||
|
|
c90d0dd843 | ||
|
|
84f9f604b0 | ||
|
|
aef77a113c | ||
|
|
13aa845c69 | ||
|
|
b0a4ee4dfe | ||
|
|
25e39d9ff9 | ||
|
|
f109b54e79 | ||
|
|
eda4321486 | ||
|
|
a9c3b14f79 | ||
|
|
f68ba7504f | ||
|
|
b331e3f736 | ||
|
|
308b9e78a1 | ||
|
|
fa8b02a83f | ||
|
|
a39504510a | ||
|
|
2f36a11a8e | ||
|
|
4df47de3f2 | ||
|
|
dfadb965b7 | ||
|
|
c6f82c3646 | ||
|
|
32c21a05f8 | ||
|
|
79864e0165 | ||
|
|
06e12f7020 | ||
|
|
3659461666 | ||
|
|
e96bceed4c | ||
|
|
ff217ccce8 | ||
|
|
4dd2eef5d1 | ||
|
|
907aa07e51 | ||
|
|
0048ed07a2 | ||
|
|
88d12873c5 | ||
|
|
9f58eebfe1 | ||
|
|
cf40d2a892 | ||
|
|
21dd212349 | ||
|
|
073308f1a3 | ||
|
|
03194e2d66 | ||
|
|
f18e22224c | ||
|
|
8ee35c9c22 | ||
|
|
d900f48d38 | ||
|
|
a846e13c78 | ||
|
|
ed2236aa24 | ||
|
|
a94ed0586e | ||
|
|
22cabc16d7 | ||
|
|
88931001fd | ||
|
|
f3dbfc9045 | ||
|
|
85df2c80a8 | ||
|
|
aca3a667c4 | ||
|
|
a0ec3690b6 | ||
|
|
37a4d41d0e | ||
|
|
382a37f1e1 | ||
|
|
201f81ce00 | ||
|
|
4904ccc3c3 | ||
|
|
6b67584d47 | ||
|
|
d575dac73a | ||
|
|
5333df283a | ||
|
|
d56ad2917d | ||
|
|
df36bcfd39 | ||
|
|
a3d3ad2208 | ||
|
|
0b0fb0af22 | ||
|
|
2aebd2927d | ||
|
|
c00e5599b0 | ||
|
|
72e2fa46c7 | ||
|
|
98f5b7f638 | ||
|
|
70ecda6fd1 | ||
|
|
5fe6538c02 | ||
|
|
84c4b0336f | ||
|
|
8fbba1ac94 | ||
|
|
1a30bcba91 | ||
|
|
ed58b2e4e2 | ||
|
|
5f975cbb50 | ||
|
|
81dd9b2386 | ||
|
|
9088521252 | ||
|
|
fc6a1e15fc | ||
|
|
94be5ca295 | ||
|
|
804d9d8196 | ||
|
|
d0e945fdd7 | ||
|
|
98e7842c26 | ||
|
|
24629895c7 | ||
|
|
256b6cb54d | ||
|
|
6b4d53315b | ||
|
|
fb25a06a66 | ||
|
|
dbe58e53e4 | ||
|
|
8dcc82ceb3 | ||
|
|
8be14ef6fe | ||
|
|
2bb34751d1 | ||
|
|
d06ba7b522 | ||
|
|
a507a04650 | ||
|
|
7761a7b23e | ||
|
|
6d2cfd52c5 | ||
|
|
75a8fc8b3e | ||
|
|
8fa05c1e72 | ||
|
|
93082b8092 | ||
|
|
d764f52f24 | ||
|
|
e5decbd0fa | ||
|
|
8a1c0e0e9b | ||
|
|
5b12ab9894 | ||
|
|
c52e3aafe6 | ||
|
|
a46170e2a1 | ||
|
|
aca1c1b156 | ||
|
|
09863b540d | ||
|
|
adb352e663 | ||
|
|
c9b39da6b9 | ||
|
|
6fe86dff00 | ||
|
|
9b1dcb2f0c | ||
|
|
22c68fff13 | ||
|
|
ddd92822b0 | ||
|
|
bd6282d1e3 | ||
|
|
7092a3ea47 | ||
|
|
695359862e | ||
|
|
95948e03c9 | ||
|
|
e286ba817b | ||
|
|
8aa0eefedd | ||
|
|
e6e5872b4b | ||
|
|
2c73f8ee62 | ||
|
|
cdc8bab7d9 | ||
|
|
f2928d7dcb | ||
|
|
44be239723 | ||
|
|
397754753f | ||
|
|
e87b470996 | ||
|
|
083d2a871c | ||
|
|
7a171cf5ea | ||
|
|
1563d7555f | ||
|
|
2e97119db8 | ||
|
|
b3a53bf642 | ||
|
|
a3f18f248c | ||
|
|
1c267f72e0 | ||
|
|
becf918b78 | ||
|
|
9c58395bb3 | ||
|
|
b117ca7720 | ||
|
|
d83a28bd1b | ||
|
|
42ef71bff9 | ||
|
|
f2da1a1665 | ||
|
|
356b76fc56 | ||
|
|
33ae56acfa | ||
|
|
9923adcb8b | ||
|
|
c21479cb9c | ||
|
|
3abca99b0c | ||
|
|
874d6aaf6b | ||
|
|
ae4f2cc4b5 | ||
|
|
dd155dca97 | ||
|
|
99307d1576 | ||
|
|
b2f3ffbc5a | ||
|
|
5774b32e55 | ||
|
|
41353a57c8 | ||
|
|
9c0cf4595a | ||
|
|
71b712356f | ||
|
|
f33e3e3b81 | ||
|
|
5f384c6323 | ||
|
|
e056b86c37 | ||
|
|
91e30397bd | ||
|
|
8a8ec7476d | ||
|
|
fca380587a | ||
|
|
cb70d7c705 | ||
|
|
b27b789e28 | ||
|
|
a9da953b55 | ||
|
|
12d5b6a2d2 | ||
|
|
a0a463494b | ||
|
|
07dca79b20 | ||
|
|
688cba7292 | ||
|
|
0fe3c21223 | ||
|
|
45df6e77ff | ||
|
|
548551c6ae | ||
|
|
e3f1fd1ffc | ||
|
|
470c49394c | ||
|
|
31662bcd28 | ||
|
|
7247302f45 | ||
|
|
1a5a5b12b7 | ||
|
|
0099dd1724 | ||
|
|
1f131c6729 | ||
|
|
fc4361b225 | ||
|
|
ce25a1e64e | ||
|
|
449a135b94 | ||
|
|
002d484abe | ||
|
|
9823ef2af5 | ||
|
|
641c6fd439 | ||
|
|
3a042471b7 | ||
|
|
dc18d64286 | ||
|
|
72a43007d8 | ||
|
|
842c28adff | ||
|
|
9810d84e2d | ||
|
|
f6153a9b5d | ||
|
|
302a88bfdb | ||
|
|
f6e83413e5 | ||
|
|
02ab3a2cb6 | ||
|
|
90e840c3c9 | ||
|
|
af60ffb7fa | ||
|
|
c28e559da4 | ||
|
|
5c59255b41 | ||
|
|
a377ee14b4 | ||
|
|
2262188d8a | ||
|
|
7c49c752a9 | ||
|
|
e29726cc50 | ||
|
|
3c73cbe92b | ||
|
|
cc357062be | ||
|
|
17c06f7167 | ||
|
|
d12e0156c3 | ||
|
|
204dedaa49 | ||
|
|
52073ce7c9 | ||
|
|
434747e007 | ||
|
|
7a78314d9d | ||
|
|
f23e9dc235 | ||
|
|
4527801d48 | ||
|
|
e0857f0226 | ||
|
|
0d074b1da6 | ||
|
|
f2fda4f906 | ||
|
|
c1c36036ae | ||
|
|
9a1438d2e3 | ||
|
|
582122851d | ||
|
|
f4d197485c | ||
|
|
68305df9b2 | ||
|
|
ca0be81833 | ||
|
|
380fbfe438 | ||
|
|
32d68a40d5 | ||
|
|
198e92c08f | ||
|
|
38b26f5285 | ||
|
|
096a009685 | ||
|
|
30c0fdb38d | ||
|
|
663dbf7395 | ||
|
|
373db0dc5e | ||
|
|
2733fb30cc | ||
|
|
d29411408b | ||
|
|
24bafdf2bb | ||
|
|
a9ede6a2bc | ||
|
|
2c5bf6982c | ||
|
|
dd3ec84000 | ||
|
|
84044e236d | ||
|
|
2ddf7ab515 | ||
|
|
f519c513c2 | ||
|
|
d5cc5b2bc8 | ||
|
|
51abf90db6 | ||
|
|
71410cb6da | ||
|
|
efb12f208c | ||
|
|
64ede5dbef | ||
|
|
7af78152a4 | ||
|
|
290ec8bb19 | ||
|
|
cdf48b1216 | ||
|
|
a24710a961 | ||
|
|
197da8afcb | ||
|
|
12385d4357 | ||
|
|
e7f8bb866f | ||
|
|
1ad19a3bd8 | ||
|
|
bb6b07dedc | ||
|
|
2403c0e198 | ||
|
|
ac18723dd4 | ||
|
|
6faa1d2e4a | ||
|
|
791272e408 | ||
|
|
e27a4db281 | ||
|
|
60cc9ddb3b | ||
|
|
7653ad40d6 | ||
|
|
004ebcaba1 | ||
|
|
cc0bec15ef | ||
|
|
20970b580a | ||
|
|
53857d418a | ||
|
|
a81a4d274f | ||
|
|
ce4a1cf447 | ||
|
|
35dd9209b9 | ||
|
|
81f91f03b4 | ||
|
|
84a5edf345 | ||
|
|
4aafe6c9d1 | ||
|
|
3ab1487641 | ||
|
|
0c7f1eac82 | ||
|
|
6fe895fd22 | ||
|
|
71d22dc994 | ||
|
|
4424a9abc0 | ||
|
|
e20e818a42 | ||
|
|
061e2fe4b4 | ||
|
|
f0a8a2857b | ||
|
|
175dfa1ede | ||
|
|
04e4fa785b | ||
|
|
6aec520889 | ||
|
|
e9906b522f | ||
|
|
2f554133c5 | ||
|
|
922b8b5365 | ||
|
|
c894db3dd4 | ||
|
|
e85562268d | ||
|
|
fca33aacbe | ||
|
|
e43713a866 | ||
|
|
b6e3cd81c6 | ||
|
|
43ad0d4416 | ||
|
|
a33b5a5c00 | ||
|
|
e2bffd4f5a | ||
|
|
a87a9636e3 | ||
|
|
a31432ee7b | ||
|
|
0c66590108 | ||
|
|
c6ea9b4b80 | ||
|
|
19455399f4 | ||
|
|
43ba1fb176 | ||
|
|
a6f56b4285 | ||
|
|
9d430d3c72 | ||
|
|
f9a2a2b57a | ||
|
|
e4d961cfad | ||
|
|
67ffebc30a | ||
|
|
cf731fafab | ||
|
|
f43a83aad7 | ||
|
|
7185f8dfea | ||
|
|
8a707de5f1 | ||
|
|
61bb6292b7 | ||
|
|
40e0ae99da | ||
|
|
2dd615a4ef | ||
|
|
7e06abdca2 | ||
|
|
c316f53e23 | ||
|
|
b6d324be69 | ||
|
|
f7380312d3 | ||
|
|
287309b65c | ||
|
|
cc3de7e723 | ||
|
|
e03b3029e3 | ||
|
|
ba07bac46a | ||
|
|
b71a881d0e | ||
|
|
ce53bb0eee | ||
|
|
c0fe1abf4d | ||
|
|
0db7fc5ab7 | ||
|
|
701ad3e017 | ||
|
|
0cc14d0aca | ||
|
|
3f5ea7998f | ||
|
|
4c7f54020b | ||
|
|
eb461d0713 | ||
|
|
128ec6717c | ||
|
|
b3cf5289f8 | ||
|
|
c701f9e817 | ||
|
|
e1a95e2057 | ||
|
|
0a5db52855 | ||
|
|
7197ade4b4 | ||
|
|
865f1ffb3c | ||
|
|
8db7629edf | ||
|
|
b8980b9ed3 | ||
|
|
5cf9eedf42 | ||
|
|
193b4213b3 | ||
|
|
8557bcedae | ||
|
|
f599bea216 | ||
|
|
704a19b0a5 | ||
|
|
e29b344e0f | ||
|
|
7cc227d01e | ||
|
|
df8ecdb603 | ||
|
|
f4bab6b290 | ||
|
|
35f3dee1d0 | ||
|
|
db89fdea23 | ||
|
|
d0898ecabc | ||
|
|
e640c6df05 | ||
|
|
ab18c721bb | ||
|
|
aaa33cf093 | ||
|
|
0f09e19e38 | ||
|
|
b301405f24 | ||
|
|
1f3032ad21 | ||
|
|
c10142f767 | ||
|
|
0d0042b7e6 | ||
|
|
78a179c971 | ||
|
|
cab828c9d4 | ||
|
|
ff46f3ff49 | ||
|
|
b67cff50f5 | ||
|
|
e29ac8a4ab | ||
|
|
20d2615081 | ||
|
|
7fb2f83540 | ||
|
|
eb8d8f142c | ||
|
|
3bea20850a | ||
|
|
ade1b73779 | ||
|
|
281ae59b5a | ||
|
|
90bb6ea907 | ||
|
|
5b14cafddd | ||
|
|
9994fce9d5 | ||
|
|
c19e1a481e | ||
|
|
39b85b02bb | ||
|
|
7a91c82cda | ||
|
|
c7cea9ef16 | ||
|
|
d56b409cb9 | ||
|
|
ee8f38111e | ||
|
|
8c13f64d3c | ||
|
|
a7efc22045 | ||
|
|
1880035f6f | ||
|
|
fdd0c50402 | ||
|
|
be24bacb79 | ||
|
|
b261d19cfe | ||
|
|
ec5acf7be2 | ||
|
|
014e7abc68 | ||
|
|
3e8f0e9984 | ||
|
|
6e8e2bf508 | ||
|
|
09cd7ba304 | ||
|
|
77bf1e81ec | ||
|
|
a9b9a2942d | ||
|
|
a261e27113 | ||
|
|
f01a33491b | ||
|
|
739e11e1ee | ||
|
|
393aae01df | ||
|
|
73cd428ed2 | ||
|
|
1e7b57e513 | ||
|
|
e1e3feb6a8 | ||
|
|
8e56d8b425 | ||
|
|
6c8445988c | ||
|
|
110b01befa | ||
|
|
d586b9d285 | ||
|
|
804d70386d | ||
|
|
fb3b2e6bc8 | ||
|
|
030d7264e6 | ||
|
|
e91c378bd4 | ||
|
|
e950b3be29 | ||
|
|
dbf0e206b8 | ||
|
|
84f66090fd | ||
|
|
f8958d4e22 | ||
|
|
70807e40f6 | ||
|
|
9a01e3d192 | ||
|
|
a03a99569d | ||
|
|
2d887046de | ||
|
|
3a091896fb | ||
|
|
8a9fe1da4b | ||
|
|
abf478c9e6 | ||
|
|
913a94d2ab | ||
|
|
01e5be3b57 | ||
|
|
e93529e9f3 | ||
|
|
ade4e23e14 | ||
|
|
fc65ded2d5 | ||
|
|
aa2b92703f | ||
|
|
2c9dbe158d | ||
|
|
d6fa5c96ae | ||
|
|
0506e68a96 | ||
|
|
b32f986105 | ||
|
|
577eedef11 | ||
|
|
27855880b2 | ||
|
|
b01d392f9e | ||
|
|
d548f5de3f | ||
|
|
f8986132d4 | ||
|
|
e7148b8080 | ||
|
|
0a29492fc5 | ||
|
|
a1e7e771ce | ||
|
|
00d2a447f4 | ||
|
|
2254ac2102 | ||
|
|
21ae31e77d | ||
|
|
a6113066ff | ||
|
|
0bb205d31f | ||
|
|
d7e8db7adc | ||
|
|
0eb3b23f16 | ||
|
|
54e381cecb | ||
|
|
cc1343d31d | ||
|
|
bce59345e4 | ||
|
|
79688e6187 | ||
|
|
babf9470c2 | ||
|
|
10d566c946 | ||
|
|
911e6ba6de | ||
|
|
f9c4d577e2 | ||
|
|
9826b518bd | ||
|
|
32a8f06486 | ||
|
|
2ab2b8656b | ||
|
|
d9ab98e47f | ||
|
|
9d584bb0d3 | ||
|
|
4f725ba9e1 | ||
|
|
b75a113c91 | ||
|
|
75af83bb81 | ||
|
|
0f6f0c3b6b | ||
|
|
b344524a6d | ||
|
|
6f4d5a532e | ||
|
|
2d83c70173 | ||
|
|
c90e862460 | ||
|
|
c46a34e6b8 | ||
|
|
693f59ba2f | ||
|
|
abae078855 | ||
|
|
0212db3fad | ||
|
|
49354f678e | ||
|
|
dc94570c4a | ||
|
|
51b1027aec | ||
|
|
936adb7d2c | ||
|
|
581d1f3bfa | ||
|
|
7c87ef6c86 | ||
|
|
1a9a9b718d | ||
|
|
6c9f3420e2 | ||
|
|
a4d0efbe8d | ||
|
|
56858a56db | ||
|
|
395caaad42 | ||
|
|
3f0639c87d | ||
|
|
889eff265f | ||
|
|
c6eb7be7fb | ||
|
|
02c7a46b97 | ||
|
|
ea7b3baa8b | ||
|
|
5724f4607c | ||
|
|
b755d47652 | ||
|
|
96221cc4f7 | ||
|
|
34d261179e | ||
|
|
091b05f155 | ||
|
|
aca5646032 | ||
|
|
7e9abbeaec | ||
|
|
c6aaa37f2d | ||
|
|
b8c3387892 | ||
|
|
c50d3aa9bd | ||
|
|
4ccff8bf28 | ||
|
|
5b5298b025 | ||
|
|
8e0939f403 | ||
|
|
cf3fc85196 | ||
|
|
e0b15c18ce | ||
|
|
566b8c3df3 | ||
|
|
32a6151df9 | ||
|
|
3777de7133 | ||
|
|
8cae4f80d7 | ||
|
|
911c5bddce | ||
|
|
4a200c308b | ||
|
|
625e45b1cb | ||
|
|
8551b0dde0 | ||
|
|
050782aff3 | ||
|
|
00885dffe1 | ||
|
|
ffcc72876c | ||
|
|
fa91ece5b4 | ||
|
|
c810b24eb9 | ||
|
|
03ced0ecfe | ||
|
|
c859bea0cf | ||
|
|
a913d1b521 | ||
|
|
2464c92572 | ||
|
|
10cd87e5a2 | ||
|
|
58c336e7f4 | ||
|
|
bb4a9583a7 | ||
|
|
7ae38346e5 | ||
|
|
7604c0f691 | ||
|
|
f2f4c3f684 | ||
|
|
34f489b1f4 | ||
|
|
72d1d2630e | ||
|
|
d559e23bc6 | ||
|
|
4637400d29 | ||
|
|
0fa943e4b7 | ||
|
|
9707b1f540 | ||
|
|
657fb208d6 | ||
|
|
647972c7cf | ||
|
|
39b58f7d4c | ||
|
|
c8378e8b7d | ||
|
|
d404ba102d | ||
|
|
5e9004c407 | ||
|
|
8e63b53b0c | ||
|
|
116bef25a7 | ||
|
|
294975ba87 | ||
|
|
51b8c659f1 | ||
|
|
082fbead66 | ||
|
|
73c16ffc65 | ||
|
|
dec51348e6 | ||
|
|
b0b919efb0 | ||
|
|
396c3ecdf7 | ||
|
|
53e5c05b0a | ||
|
|
dedeb4c181 | ||
|
|
e611063669 | ||
|
|
6c9c9a401f | ||
|
|
6da4396faa | ||
|
|
d89fb68a7a | ||
|
|
8d9462147c | ||
|
|
89b7fa6b06 | ||
|
|
d4a550bb4c | ||
|
|
d5e331a2fb | ||
|
|
367da0fcc2 | ||
|
|
8111b0aa83 | ||
|
|
735440d1a3 | ||
|
|
3ae340527f | ||
|
|
bfa9ed814d | ||
|
|
1e4678c02f | ||
|
|
66fffd69ce | ||
|
|
e3f99d670e | ||
|
|
360488abb4 | ||
|
|
8dda44105e | ||
|
|
2215e17223 | ||
|
|
157db307f9 | ||
|
|
0bd39b2c5e | ||
|
|
8f31ed51e1 | ||
|
|
d2d1f92836 | ||
|
|
c02819ab9f | ||
|
|
28a3a5bd61 | ||
|
|
891815634b | ||
|
|
8650328922 | ||
|
|
7bd07e3b9b | ||
|
|
76195bb3ac | ||
|
|
6afd492095 | ||
|
|
c95bce4aea | ||
|
|
fd3a1c13e3 | ||
|
|
95824ac2ec | ||
|
|
a050158d11 | ||
|
|
e0ef601123 | ||
|
|
9c5d485fdd | ||
|
|
cb88b16207 | ||
|
|
257c025975 | ||
|
|
50bdf9d3b9 | ||
|
|
8d58894daa | ||
|
|
43fa7f9fd5 | ||
|
|
f2a8bfeb9f | ||
|
|
06bbeea37f | ||
|
|
e5f26f819a | ||
|
|
a058f17946 | ||
|
|
a4b4fc8b6c | ||
|
|
ab35baaa29 | ||
|
|
883bb92991 | ||
|
|
bfb58de7b8 | ||
|
|
6faf2d63d0 | ||
|
|
569f3caab9 | ||
|
|
7cd0f5e8a4 | ||
|
|
02cc6bcc05 | ||
|
|
9ff09b73ad | ||
|
|
f93cf4b980 | ||
|
|
3d7be5b287 | ||
|
|
cdf41bd500 | ||
|
|
735a6aaa39 | ||
|
|
0c2648c188 | ||
|
|
7e6291c21c | ||
|
|
3f7749c6d4 | ||
|
|
586c5411f1 | ||
|
|
2be16b581c | ||
|
|
06e22bf878 | ||
|
|
0b4b530809 | ||
|
|
efca3daa5c | ||
|
|
fdefe46c40 | ||
|
|
34be10840c | ||
|
|
80ad1db228 | ||
|
|
e918ea89a3 | ||
|
|
19b968849f | ||
|
|
5bc11891f5 | ||
|
|
818d26b5f9 | ||
|
|
c47354bdc3 | ||
|
|
86ce0e0c66 | ||
|
|
39f03b86c8 | ||
|
|
8287ba24b9 | ||
|
|
ab1aac9f3e | ||
|
|
3e353004b8 | ||
|
|
bcb04d38a5 | ||
|
|
de0e2bf828 | ||
|
|
8fed47a2be | ||
|
|
17d4968425 | ||
|
|
54acee6880 | ||
|
|
a4e05d4db3 | ||
|
|
b0acbed329 | ||
|
|
1b2967320b | ||
|
|
90f6be0c98 | ||
|
|
78ed610b50 | ||
|
|
af891808f6 | ||
|
|
0c5a402206 | ||
|
|
8744eeeb19 | ||
|
|
ce13596077 | ||
|
|
402a29e50c | ||
|
|
0363e58467 | ||
|
|
c8a14ccabb | ||
|
|
1de29fd4e6 | ||
|
|
75a0155f73 | ||
|
|
adb55bcfe9 | ||
|
|
2201ec8905 | ||
|
|
39f6fdef1a | ||
|
|
699aa5cf38 | ||
|
|
1486adb25a | ||
|
|
2653c2f5e8 | ||
|
|
7b7244dac2 | ||
|
|
571ce2b0b9 | ||
|
|
c3db5ed749 | ||
|
|
0797148076 | ||
|
|
24c9530eee | ||
|
|
679cf7c0d7 | ||
|
|
19b6405332 | ||
|
|
aee8aa1c61 | ||
|
|
5514a862dc | ||
|
|
1ea8bb782c | ||
|
|
35722cd5aa | ||
|
|
533ecee252 | ||
|
|
f1db2d0c8e | ||
|
|
6f6fb4dcd6 | ||
|
|
b1ba7ba685 | ||
|
|
6dccfee862 | ||
|
|
6f32b80b2b | ||
|
|
2feed18b28 | ||
|
|
36dca3516a | ||
|
|
06129277ed | ||
|
|
6b1482daee | ||
|
|
24e4787a64 | ||
|
|
5bfae22c8f | ||
|
|
3e078f0494 | ||
|
|
0b4f59b82b | ||
|
|
a19af04582 | ||
|
|
0676aa11a9 | ||
|
|
be25bbce92 | ||
|
|
5ecfbbaf5d | ||
|
|
7f7cd737dc | ||
|
|
b472e5a689 | ||
|
|
25c674ed32 | ||
|
|
3d93cf9e2d | ||
|
|
f7edea5f40 | ||
|
|
d26e220fb9 | ||
|
|
d860270733 | ||
|
|
a09633e859 | ||
|
|
a1837a4d69 | ||
|
|
52cc3bc8eb | ||
|
|
9175aca094 | ||
|
|
848727a21d | ||
|
|
df7d5fa2b9 | ||
|
|
86dfc91dd5 | ||
|
|
7f66d9184b | ||
|
|
ff5f31b87e | ||
|
|
a0c465c2eb | ||
|
|
d11279e615 | ||
|
|
266aac9e61 | ||
|
|
4ffd3eacb0 | ||
|
|
a443255b3e | ||
|
|
a992840c9b | ||
|
|
dbc1d981c9 | ||
|
|
9993f51b5e | ||
|
|
3a3fc0a4be | ||
|
|
5316dd9c27 | ||
|
|
59a1a85a2b | ||
|
|
fc502e1e79 | ||
|
|
405de9e0f8 | ||
|
|
6eac5046c6 | ||
|
|
f7f722af52 | ||
|
|
583f6eeedd | ||
|
|
bec35b4965 | ||
|
|
e596d8287c | ||
|
|
6c903d2d93 | ||
|
|
914431b94a | ||
|
|
11da7436c7 | ||
|
|
0f532aa5c1 | ||
|
|
835828fe92 | ||
|
|
fff1011ed8 | ||
|
|
ef497caa1b | ||
|
|
4f3f0542d4 | ||
|
|
5fa987519d | ||
|
|
77ceeaf5fd | ||
|
|
4a9d3bedf9 | ||
|
|
802eb931d1 | ||
|
|
9ebeb3d7e4 | ||
|
|
e631c6f7e0 | ||
|
|
163e5c29e4 | ||
|
|
4aae917f74 | ||
|
|
9b393eb861 | ||
|
|
5fa3016703 | ||
|
|
03cccd60a6 | ||
|
|
177c21b294 | ||
|
|
f4873d9387 | ||
|
|
747d64cdae | ||
|
|
c9efd5c132 | ||
|
|
546ddd2a84 | ||
|
|
2edb5428f9 | ||
|
|
9f082125fa | ||
|
|
11582105ab | ||
|
|
c4e69fe2c3 | ||
|
|
4435a4f19d | ||
|
|
02ae7a0563 | ||
|
|
852dc0f4de | ||
|
|
844ad15109 | ||
|
|
522e892099 | ||
|
|
0445f404ec | ||
|
|
bc1909fa22 | ||
|
|
ca71830963 | ||
|
|
a28eebfca3 | ||
|
|
0d31ea08c3 | ||
|
|
614c003704 | ||
|
|
b511295349 | ||
|
|
fcdc292647 | ||
|
|
09836cd150 | ||
|
|
49ec9943b9 | ||
|
|
72c1edaaa4 | ||
|
|
294ed7a751 | ||
|
|
31c0062d5e | ||
|
|
63d920510d | ||
|
|
16f9691e80 | ||
|
|
209d003832 | ||
|
|
62cfd60e38 | ||
|
|
fdbc9657bc | ||
|
|
ad4401aa40 | ||
|
|
c26280c331 | ||
|
|
b028a7dfc9 | ||
|
|
41cd0d30eb | ||
|
|
8be9e9655c | ||
|
|
31bdba7456 | ||
|
|
d6e1d10b12 | ||
|
|
21268f7abe | ||
|
|
91b95ff707 | ||
|
|
6ed79b7bb8 | ||
|
|
b4f5ed6618 | ||
|
|
ed46491a3d | ||
|
|
dc8c20e002 | ||
|
|
68417cc888 | ||
|
|
a2fb5b2b9d | ||
|
|
3fbfc5a649 | ||
|
|
00535a2016 | ||
|
|
fd452d52ca | ||
|
|
7cc58af932 | ||
|
|
ddb87af5ce | ||
|
|
b9ea83fed8 | ||
|
|
e279224484 | ||
|
|
12d8f0f4b0 | ||
|
|
6ba68d150c | ||
|
|
1b3a7bbf03 | ||
|
|
4e686f8b77 | ||
|
|
62c780a448 | ||
|
|
bc055edf12 | ||
|
|
47c72a4e2e | ||
|
|
02a78e5a45 | ||
|
|
01d9a2f589 | ||
|
|
5403f215bc | ||
|
|
96e2955ba7 | ||
|
|
03659c4175 | ||
|
|
843e2bd9b6 | ||
|
|
fec8cda16a | ||
|
|
2c448d4a5c | ||
|
|
3d302441b6 | ||
|
|
8061abe279 | ||
|
|
ea9aaa6022 | ||
|
|
cc9eeda889 | ||
|
|
25f1dcf724 | ||
|
|
31debf7055 | ||
|
|
8504d0d8ba | ||
|
|
7ef8cd881c | ||
|
|
79704dc9b0 | ||
|
|
06c928bc52 | ||
|
|
62808cbd86 | ||
|
|
14994cb6cc | ||
|
|
6b79679cb4 | ||
|
|
caf79f6910 | ||
|
|
6e2768097a | ||
|
|
8845938881 | ||
|
|
3a90079ab8 | ||
|
|
b9fee36f6e | ||
|
|
17d6624bb9 | ||
|
|
f53bb63b2d | ||
|
|
ea7bcfffbb | ||
|
|
4651c44dde | ||
|
|
4fcc5e253c | ||
|
|
89a1a56328 | ||
|
|
db1528bc73 | ||
|
|
587bdc75de | ||
|
|
98f54c9f7f | ||
|
|
cd1d10761f | ||
|
|
9de9bc23f8 | ||
|
|
02f68ebac8 | ||
|
|
c8aa653275 | ||
|
|
79fc74c7a4 | ||
|
|
47bbb85a20 | ||
|
|
d68d7d5a6f | ||
|
|
bff9036f14 | ||
|
|
8b08c2a918 | ||
|
|
b9f0fabb5c | ||
|
|
60ee04674d | ||
|
|
9901d6b2e7 | ||
|
|
663e8384a3 |
2
.github/FUNDING.yml
vendored
2
.github/FUNDING.yml
vendored
@@ -1,2 +0,0 @@
|
||||
ko_fi: kradalby
|
||||
github: [kradalby]
|
||||
2
.github/pull_request_template.md
vendored
2
.github/pull_request_template.md
vendored
@@ -1,6 +1,6 @@
|
||||
<!-- Please tick if the following things apply. You… -->
|
||||
|
||||
- [ ] read the [CONTRIBUTING guidelines](README.md#user-content-contributing)
|
||||
- [ ] read the [CONTRIBUTING guidelines](README.md#contributing)
|
||||
- [ ] raised a GitHub issue or discussed it on the projects chat beforehand
|
||||
- [ ] added unit tests
|
||||
- [ ] added integration tests
|
||||
|
||||
4
.github/workflows/build.yml
vendored
4
.github/workflows/build.yml
vendored
@@ -13,13 +13,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v14.1
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
|
||||
2
.github/workflows/contributors.yml
vendored
2
.github/workflows/contributors.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
||||
add-contributors:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- name: Delete upstream contributor branch
|
||||
# Allow continue on failure to account for when the
|
||||
# upstream branch is deleted or does not exist.
|
||||
|
||||
10
.github/workflows/lint.yml
vendored
10
.github/workflows/lint.yml
vendored
@@ -1,5 +1,5 @@
|
||||
---
|
||||
name: CI
|
||||
name: Lint
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
@@ -7,13 +7,13 @@ jobs:
|
||||
golangci-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v14.1
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
version: latest
|
||||
version: v1.49.0
|
||||
|
||||
# Only block PRs on new problems.
|
||||
# If this is not enabled, we will end up having PRs
|
||||
@@ -70,7 +70,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: bufbuild/buf-setup-action@v0.7.0
|
||||
- uses: bufbuild/buf-setup-action@v1.7.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: "proto"
|
||||
|
||||
20
.github/workflows/release.yml
vendored
20
.github/workflows/release.yml
vendored
@@ -1,5 +1,5 @@
|
||||
---
|
||||
name: release
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -12,13 +12,13 @@ jobs:
|
||||
runs-on: ubuntu-18.04 # due to CGO we need to user an older version
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.18.0
|
||||
go-version: 1.19.0
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
@@ -37,7 +37,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Docker Buildx
|
||||
@@ -89,6 +89,8 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new
|
||||
build-args: |
|
||||
VERSION=${{ steps.meta.outputs.version }}
|
||||
- name: Prepare cache for next build
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
@@ -98,7 +100,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Docker Buildx
|
||||
@@ -153,6 +155,8 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=local,src=/tmp/.buildx-cache-debug
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-debug-new
|
||||
build-args: |
|
||||
VERSION=${{ steps.meta-debug.outputs.version }}
|
||||
- name: Prepare cache for next build
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache-debug
|
||||
@@ -162,7 +166,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Docker Buildx
|
||||
@@ -217,6 +221,8 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=local,src=/tmp/.buildx-cache-alpine
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-alpine-new
|
||||
build-args: |
|
||||
VERSION=${{ steps.meta-alpine.outputs.version }}
|
||||
- name: Prepare cache for next build
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache-alpine
|
||||
|
||||
2
.github/workflows/renovatebot.yml
vendored
2
.github/workflows/renovatebot.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
APP_ID: ${{ secrets.RENOVATEBOT_APP_ID }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2.0.0
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Self-hosted Renovate
|
||||
uses: renovatebot/github-action@v31.81.3
|
||||
|
||||
35
.github/workflows/test-integration-cli.yml
vendored
Normal file
35
.github/workflows/test-integration-cli.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Integration Test CLI
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
integration-test-cli:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Set Swap Space
|
||||
uses: pierotofy/set-swap-space@master
|
||||
with:
|
||||
swap-size-gb: 10
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v16
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run CLI integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: nix develop --command -- make test_integration_cli
|
||||
35
.github/workflows/test-integration-derp.yml
vendored
Normal file
35
.github/workflows/test-integration-derp.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Integration Test DERP
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
integration-test-derp:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Set Swap Space
|
||||
uses: pierotofy/set-swap-space@master
|
||||
with:
|
||||
swap-size-gb: 10
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v16
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run Embedded DERP server integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: nix develop --command -- make test_integration_derp
|
||||
35
.github/workflows/test-integration-oidc.yml
vendored
Normal file
35
.github/workflows/test-integration-oidc.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Integration Test OIDC
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
integration-test-oidc:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Set Swap Space
|
||||
uses: pierotofy/set-swap-space@master
|
||||
with:
|
||||
swap-size-gb: 10
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v16
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run OIDC integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: nix develop --command -- make test_integration_oidc
|
||||
@@ -1,9 +1,9 @@
|
||||
name: CI
|
||||
name: Integration Test v2
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
integration-test:
|
||||
integration-test-v2:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
@@ -11,6 +11,11 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Set Swap Space
|
||||
uses: pierotofy/set-swap-space@master
|
||||
with:
|
||||
swap-size-gb: 10
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v14.1
|
||||
@@ -25,6 +30,6 @@ jobs:
|
||||
- uses: cachix/install-nix-action@v16
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run Integration tests
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: nix develop --command -- make test_integration
|
||||
run: nix develop --command -- make test_integration_v2_auth_web_flow
|
||||
27
.github/workflows/test-integration-v2-kradalby.yml
vendored
Normal file
27
.github/workflows/test-integration-v2-kradalby.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
name: Integration Test v2 - kradalby
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
integration-test-v2-kradalby:
|
||||
runs-on: [self-hosted, linux, x64, nixos, docker]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: nix develop --command -- make test_integration_v2_general
|
||||
6
.github/workflows/test.yml
vendored
6
.github/workflows/test.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: CI
|
||||
name: Tests
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
@@ -7,13 +7,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v14.1
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -31,3 +31,5 @@ test_output/
|
||||
# Nix build output
|
||||
result
|
||||
.direnv/
|
||||
|
||||
integration_test/etc/config.dump.yaml
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
---
|
||||
run:
|
||||
timeout: 10m
|
||||
build-tags:
|
||||
- ts2019
|
||||
|
||||
issues:
|
||||
skip-dirs:
|
||||
@@ -24,6 +26,9 @@ linters:
|
||||
- tagliatelle
|
||||
- godox
|
||||
- ireturn
|
||||
- execinquery
|
||||
- exhaustruct
|
||||
- nolintlint
|
||||
|
||||
# We should strive to enable these:
|
||||
- wrapcheck
|
||||
@@ -31,6 +36,9 @@ linters:
|
||||
- makezero
|
||||
- maintidx
|
||||
|
||||
# Limits the methods of an interface to 10. We have more in integration tests
|
||||
- interfacebloat
|
||||
|
||||
# We might want to enable this, but it might be a lot of work
|
||||
- cyclop
|
||||
- nestif
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy -compat=1.17
|
||||
- go mod tidy -compat=1.19
|
||||
|
||||
release:
|
||||
prerelease: auto
|
||||
@@ -20,22 +20,24 @@ builds:
|
||||
- -mod=readonly
|
||||
ldflags:
|
||||
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
|
||||
tags:
|
||||
- ts2019
|
||||
|
||||
- id: linux-armhf
|
||||
- id: darwin-arm64
|
||||
main: ./cmd/headscale/headscale.go
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
- darwin
|
||||
goarch:
|
||||
- arm
|
||||
goarm:
|
||||
- "7"
|
||||
- arm64
|
||||
flags:
|
||||
- -mod=readonly
|
||||
ldflags:
|
||||
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
|
||||
tags:
|
||||
- ts2019
|
||||
|
||||
- id: linux-amd64
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
@@ -48,6 +50,8 @@ builds:
|
||||
main: ./cmd/headscale/headscale.go
|
||||
ldflags:
|
||||
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
|
||||
tags:
|
||||
- ts2019
|
||||
|
||||
- id: linux-arm64
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
@@ -60,12 +64,14 @@ builds:
|
||||
main: ./cmd/headscale/headscale.go
|
||||
ldflags:
|
||||
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
|
||||
tags:
|
||||
- ts2019
|
||||
|
||||
archives:
|
||||
- id: golang-cross
|
||||
builds:
|
||||
- darwin-amd64
|
||||
- linux-armhf
|
||||
- darwin-arm64
|
||||
- linux-amd64
|
||||
- linux-arm64
|
||||
name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
|
||||
|
||||
89
CHANGELOG.md
89
CHANGELOG.md
@@ -1,12 +1,97 @@
|
||||
# CHANGELOG
|
||||
|
||||
## 0.16.0 (2022-xx-xx)
|
||||
## 0.17.0 (2022-XX-XX)
|
||||
|
||||
### BREAKING
|
||||
|
||||
- Log level option `log_level` was moved to a distinct `log` config section and renamed to `level` [#768](https://github.com/juanfont/headscale/pull/768)
|
||||
|
||||
### Changes
|
||||
|
||||
- Added support for Tailscale TS2021 protocol [#738](https://github.com/juanfont/headscale/pull/738)
|
||||
- Add ability to specify config location via env var `HEADSCALE_CONFIG` [#674](https://github.com/juanfont/headscale/issues/674)
|
||||
- Target Go 1.19 for Headscale [#778](https://github.com/juanfont/headscale/pull/778)
|
||||
- Target Tailscale v1.30.0 to build Headscale [#780](https://github.com/juanfont/headscale/pull/780)
|
||||
- Give a warning when running Headscale with reverse proxy improperly configured for WebSockets [#788](https://github.com/juanfont/headscale/pull/788)
|
||||
- Fix subnet routers with Primary Routes [#811](https://github.com/juanfont/headscale/pull/811)
|
||||
- Added support for JSON logs [#653](https://github.com/juanfont/headscale/issues/653)
|
||||
- Sanitise the node key passed to registration url [#823](https://github.com/juanfont/headscale/pull/823)
|
||||
- Add support for generating pre-auth keys with tags [#767](https://github.com/juanfont/headscale/pull/767)
|
||||
- Add support for evaluating `autoApprovers` ACL entries when a machine is registered [#763](https://github.com/juanfont/headscale/pull/763)
|
||||
- Add config flag to allow Headscale to start if OIDC provider is down [#829](https://github.com/juanfont/headscale/pull/829)
|
||||
- Fix prefix length comparison bug in AutoApprovers route evaluation [#862](https://github.com/juanfont/headscale/pull/862)
|
||||
- Random node DNS suffix only applied if names collide in namespace. [#766](https://github.com/juanfont/headscale/issues/766)
|
||||
- Remove `ip_prefix` configuration option and warning [#899](https://github.com/juanfont/headscale/pull/899)
|
||||
- Add `dns_config.override_local_dns` option [#905](https://github.com/juanfont/headscale/pull/905)
|
||||
- Fix some DNS config issues [#660](https://github.com/juanfont/headscale/issues/660)
|
||||
- Make it possible to disable TS2019 with build flag [#928](https://github.com/juanfont/headscale/pull/928)
|
||||
|
||||
## 0.16.4 (2022-08-21)
|
||||
|
||||
### Changes
|
||||
|
||||
- Add ability to connect to PostgreSQL over TLS/SSL [#745](https://github.com/juanfont/headscale/pull/745)
|
||||
- Fix CLI registration of expired machines [#754](https://github.com/juanfont/headscale/pull/754)
|
||||
|
||||
## 0.16.3 (2022-08-17)
|
||||
|
||||
### Changes
|
||||
|
||||
- Fix issue with OIDC authentication [#747](https://github.com/juanfont/headscale/pull/747)
|
||||
|
||||
## 0.16.2 (2022-08-14)
|
||||
|
||||
### Changes
|
||||
|
||||
- Fixed bugs in the client registration process after migration to NodeKey [#735](https://github.com/juanfont/headscale/pull/735)
|
||||
|
||||
## 0.16.1 (2022-08-12)
|
||||
|
||||
### Changes
|
||||
|
||||
- Updated dependencies (including the library that lacked armhf support) [#722](https://github.com/juanfont/headscale/pull/722)
|
||||
- Fix missing group expansion in function `excludeCorretlyTaggedNodes` [#563](https://github.com/juanfont/headscale/issues/563)
|
||||
- Improve registration protocol implementation and switch to NodeKey as main identifier [#725](https://github.com/juanfont/headscale/pull/725)
|
||||
- Add ability to connect to PostgreSQL via unix socket [#734](https://github.com/juanfont/headscale/pull/734)
|
||||
|
||||
## 0.16.0 (2022-07-25)
|
||||
|
||||
**Note:** Take a backup of your database before upgrading.
|
||||
|
||||
### BREAKING
|
||||
|
||||
- Old ACL syntax is no longer supported ("users" & "ports" -> "src" & "dst"). Please check [the new syntax](https://tailscale.com/kb/1018/acls/).
|
||||
|
||||
### Changes
|
||||
|
||||
- **Drop** armhf (32-bit ARM) support. [#609](https://github.com/juanfont/headscale/pull/609)
|
||||
- Headscale fails to serve if the ACL policy file cannot be parsed [#537](https://github.com/juanfont/headscale/pull/537)
|
||||
- Fix labels cardinality error when registering unknown pre-auth key [#519](https://github.com/juanfont/headscale/pull/519)
|
||||
- Fix send on closed channel crash in polling [#542](https://github.com/juanfont/headscale/pull/542)
|
||||
- Fixed spurious calls to setLastStateChangeToNow from ephemeral nodes [#566](https://github.com/juanfont/headscale/pull/566)
|
||||
- Add command for moving nodes between namespaces [#362](https://github.com/juanfont/headscale/issues/362)
|
||||
- Added more configuration parameters for OpenID Connect (scopes, free-form paramters, domain and user allowlist)
|
||||
- Add command to set tags on a node [#525](https://github.com/juanfont/headscale/issues/525)
|
||||
- Add command to view tags of nodes [#356](https://github.com/juanfont/headscale/issues/356)
|
||||
- Add --all (-a) flag to enable routes command [#360](https://github.com/juanfont/headscale/issues/360)
|
||||
- Fix issue where nodes was not updated across namespaces [#560](https://github.com/juanfont/headscale/pull/560)
|
||||
- Add the ability to rename a nodes name [#560](https://github.com/juanfont/headscale/pull/560)
|
||||
- Node DNS names are now unique, a random suffix will be added when a node joins
|
||||
- This change contains database changes, remember to **backup** your database before upgrading
|
||||
- Add option to enable/disable logtail (Tailscale's logging infrastructure) [#596](https://github.com/juanfont/headscale/pull/596)
|
||||
- This change disables the logs by default
|
||||
- Use [Prometheus]'s duration parser, supporting days (`d`), weeks (`w`) and years (`y`) [#598](https://github.com/juanfont/headscale/pull/598)
|
||||
- Add support for reloading ACLs with SIGHUP [#601](https://github.com/juanfont/headscale/pull/601)
|
||||
- Use new ACL syntax [#618](https://github.com/juanfont/headscale/pull/618)
|
||||
- Add -c option to specify config file from command line [#285](https://github.com/juanfont/headscale/issues/285) [#612](https://github.com/juanfont/headscale/pull/601)
|
||||
- Add configuration option to allow Tailscale clients to use a random WireGuard port. [kb/1181/firewalls](https://tailscale.com/kb/1181/firewalls) [#624](https://github.com/juanfont/headscale/pull/624)
|
||||
- Improve obtuse UX regarding missing configuration (`ephemeral_node_inactivity_timeout` not set) [#639](https://github.com/juanfont/headscale/pull/639)
|
||||
- Fix nodes being shown as 'offline' in `tailscale status` [#648](https://github.com/juanfont/headscale/pull/648)
|
||||
- Improve shutdown behaviour [#651](https://github.com/juanfont/headscale/pull/651)
|
||||
- Drop Gin as web framework in Headscale [648](https://github.com/juanfont/headscale/pull/648) [677](https://github.com/juanfont/headscale/pull/677)
|
||||
- Make tailnet node updates check interval configurable [#675](https://github.com/juanfont/headscale/pull/675)
|
||||
- Fix regression with HTTP API [#684](https://github.com/juanfont/headscale/pull/684)
|
||||
- nodes ls now print both Hostname and Name(Issue [#647](https://github.com/juanfont/headscale/issues/647) PR [#687](https://github.com/juanfont/headscale/pull/687))
|
||||
|
||||
## 0.15.0 (2022-03-20)
|
||||
|
||||
@@ -77,7 +162,7 @@ This is a part of aligning `headscale`'s behaviour with Tailscale's upstream beh
|
||||
- OpenID Connect users will be mapped per namespaces
|
||||
- Each user will get its own namespace, created if it does not exist
|
||||
- `oidc.domain_map` option has been removed
|
||||
- `strip_email_domain` option has been added (see [config-example.yaml](./config_example.yaml))
|
||||
- `strip_email_domain` option has been added (see [config-example.yaml](./config-example.yaml))
|
||||
|
||||
### Changes
|
||||
|
||||
|
||||
134
CODE_OF_CONDUCT.md
Normal file
134
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,134 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation
|
||||
in our community a harassment-free experience for everyone, regardless
|
||||
of age, body size, visible or invisible disability, ethnicity, sex
|
||||
characteristics, gender identity and expression, level of experience,
|
||||
education, socio-economic status, nationality, personal appearance,
|
||||
race, religion, or sexual identity and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open,
|
||||
welcoming, diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for
|
||||
our community include:
|
||||
|
||||
- Demonstrating empathy and kindness toward other people
|
||||
- Being respectful of differing opinions, viewpoints, and experiences
|
||||
- Giving and gracefully accepting constructive feedback
|
||||
- Accepting responsibility and apologizing to those affected by our
|
||||
mistakes, and learning from the experience
|
||||
- Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
- The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
- Trolling, insulting or derogatory comments, and personal or
|
||||
political attacks
|
||||
- Public or private harassment
|
||||
- Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
- Other conduct which could reasonably be considered inappropriate in
|
||||
a professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our
|
||||
standards of acceptable behavior and will take appropriate and fair
|
||||
corrective action in response to any behavior that they deem
|
||||
inappropriate, threatening, offensive, or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit,
|
||||
or reject comments, commits, code, wiki edits, issues, and other
|
||||
contributions that are not aligned to this Code of Conduct, and will
|
||||
communicate reasons for moderation decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also
|
||||
applies when an individual is officially representing the community in
|
||||
public spaces. Examples of representing our community include using an
|
||||
official e-mail address, posting via an official social media account,
|
||||
or acting as an appointed representative at an online or offline
|
||||
event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior
|
||||
may be reported to the community leaders responsible for enforcement
|
||||
at our Discord channel. All complaints
|
||||
will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and
|
||||
security of the reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in
|
||||
determining the consequences for any action they deem in violation of
|
||||
this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior
|
||||
deemed unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders,
|
||||
providing clarity around the nature of the violation and an
|
||||
explanation of why the behavior was inappropriate. A public apology
|
||||
may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series
|
||||
of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued
|
||||
behavior. No interaction with the people involved, including
|
||||
unsolicited interaction with those enforcing the Code of Conduct, for
|
||||
a specified period of time. This includes avoiding interactions in
|
||||
community spaces as well as external channels like social
|
||||
media. Violating these terms may lead to a temporary or permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards,
|
||||
including sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or
|
||||
public communication with the community for a specified period of
|
||||
time. No public or private interaction with the people involved,
|
||||
including unsolicited interaction with those enforcing the Code of
|
||||
Conduct, is allowed during this period. Violating these terms may lead
|
||||
to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of
|
||||
community standards, including sustained inappropriate behavior,
|
||||
harassment of an individual, or aggression toward or disparagement of
|
||||
classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction
|
||||
within the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor
|
||||
Covenant][homepage], version 2.0, available at
|
||||
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of
|
||||
conduct enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the
|
||||
FAQ at https://www.contributor-covenant.org/faq. Translations are
|
||||
available at https://www.contributor-covenant.org/translations.
|
||||
@@ -1,5 +1,6 @@
|
||||
# Builder image
|
||||
FROM docker.io/golang:1.18.0-bullseye AS build
|
||||
FROM docker.io/golang:1.19.0-bullseye AS build
|
||||
ARG VERSION=dev
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
|
||||
@@ -8,7 +9,7 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN GGO_ENABLED=0 GOOS=linux go install -a ./cmd/headscale
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -tags ts2019 -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
|
||||
RUN strip /go/bin/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# Builder image
|
||||
FROM docker.io/golang:1.18.0-alpine AS build
|
||||
FROM docker.io/golang:1.19.0-alpine AS build
|
||||
ARG VERSION=dev
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
|
||||
@@ -9,7 +10,7 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN GGO_ENABLED=0 GOOS=linux go install -a ./cmd/headscale
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
|
||||
RUN strip /go/bin/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# Builder image
|
||||
FROM docker.io/golang:1.18.0-bullseye AS build
|
||||
FROM docker.io/golang:1.19.0-bullseye AS build
|
||||
ARG VERSION=dev
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
|
||||
@@ -8,11 +9,11 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN GGO_ENABLED=0 GOOS=linux go install -a ./cmd/headscale
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -tags ts2019 -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
# Debug image
|
||||
FROM gcr.io/distroless/base-debian11:debug
|
||||
FROM docker.io/golang:1.19.0-bullseye
|
||||
|
||||
COPY --from=build /go/bin/headscale /bin/headscale
|
||||
ENV TZ UTC
|
||||
|
||||
@@ -7,7 +7,9 @@ RUN apt-get update \
|
||||
|
||||
RUN git clone https://github.com/tailscale/tailscale.git
|
||||
|
||||
WORKDIR tailscale
|
||||
WORKDIR /go/tailscale
|
||||
|
||||
RUN git checkout main
|
||||
|
||||
RUN sh build_dist.sh tailscale.com/cmd/tailscale
|
||||
RUN sh build_dist.sh tailscale.com/cmd/tailscaled
|
||||
|
||||
67
Makefile
67
Makefile
@@ -1,8 +1,17 @@
|
||||
# Calculate version
|
||||
version = $(git describe --always --tags --dirty)
|
||||
version ?= $(shell git describe --always --tags --dirty)
|
||||
|
||||
rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d))
|
||||
|
||||
# Determine if OS supports pie
|
||||
GOOS ?= $(shell uname | tr '[:upper:]' '[:lower:]')
|
||||
ifeq ($(filter $(GOOS), openbsd netbsd soloaris plan9), )
|
||||
pieflags = -buildmode=pie
|
||||
else
|
||||
endif
|
||||
|
||||
TAGS = -tags ts2019
|
||||
|
||||
# GO_SOURCES = $(wildcard *.go)
|
||||
# PROTO_SOURCES = $(wildcard **/*.proto)
|
||||
GO_SOURCES = $(call rwildcard,,*.go)
|
||||
@@ -10,21 +19,65 @@ PROTO_SOURCES = $(call rwildcard,,*.proto)
|
||||
|
||||
|
||||
build:
|
||||
CGO_ENABLED=0 go build -trimpath -buildmode=pie -mod=readonly -ldflags "-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$(version)" cmd/headscale/headscale.go
|
||||
nix build
|
||||
|
||||
dev: lint test build
|
||||
|
||||
test:
|
||||
@go test -coverprofile=coverage.out ./...
|
||||
@go test $(TAGS) -short -coverprofile=coverage.out ./...
|
||||
|
||||
test_integration:
|
||||
go test -failfast -tags integration -timeout 30m -count=1 ./...
|
||||
test_integration: test_integration_cli test_integration_derp test_integration_oidc test_integration_v2_general
|
||||
|
||||
test_integration_cli:
|
||||
go test -tags integration -v integration_cli_test.go integration_common_test.go
|
||||
docker network rm $$(docker network ls --filter name=headscale --quiet) || true
|
||||
docker network create headscale-test || true
|
||||
docker run -t --rm \
|
||||
--network headscale-test \
|
||||
-v ~/.cache/hs-integration-go:/go \
|
||||
-v $$PWD:$$PWD -w $$PWD \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock golang:1 \
|
||||
go test $(TAGS) -failfast -timeout 30m -count=1 -run IntegrationCLI ./...
|
||||
|
||||
test_integration_derp:
|
||||
go test -tags integration -v integration_embedded_derp_test.go integration_common_test.go
|
||||
docker network rm $$(docker network ls --filter name=headscale --quiet) || true
|
||||
docker network create headscale-test || true
|
||||
docker run -t --rm \
|
||||
--network headscale-test \
|
||||
-v ~/.cache/hs-integration-go:/go \
|
||||
-v $$PWD:$$PWD -w $$PWD \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock golang:1 \
|
||||
go test $(TAGS) -failfast -timeout 30m -count=1 -run IntegrationDERP ./...
|
||||
|
||||
test_integration_oidc:
|
||||
docker network rm $$(docker network ls --filter name=headscale --quiet) || true
|
||||
docker network create headscale-test || true
|
||||
docker run -t --rm \
|
||||
--network headscale-test \
|
||||
-v ~/.cache/hs-integration-go:/go \
|
||||
-v $$PWD:$$PWD -w $$PWD \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock golang:1 \
|
||||
go test $(TAGS) -failfast -timeout 30m -count=1 -run IntegrationOIDC ./...
|
||||
|
||||
test_integration_v2_general:
|
||||
docker run \
|
||||
-t --rm \
|
||||
-v ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
-v $$PWD:$$PWD -w $$PWD/integration \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
golang:1 \
|
||||
go test $(TAGS) -failfast ./... -timeout 60m -parallel 6
|
||||
|
||||
|
||||
test_integration_v2_auth_web_flow:
|
||||
docker run \
|
||||
-t --rm \
|
||||
-v ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
-v $$PWD:$$PWD -w $$PWD/integration \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
golang:1 \
|
||||
go test ./... -timeout 60m -parallel 6 -run TestAuthWebFlow
|
||||
|
||||
coverprofile_func:
|
||||
go tool cover -func=coverage.out
|
||||
|
||||
311
README.md
311
README.md
@@ -1,4 +1,4 @@
|
||||
# headscale
|
||||

|
||||
|
||||

|
||||
|
||||
@@ -28,7 +28,7 @@ and exposes the advertised routes of your nodes.
|
||||
|
||||
A [Tailscale network (tailnet)](https://tailscale.com/kb/1136/tailnet/) is private
|
||||
network which Tailscale assigns to a user in terms of private users or an
|
||||
organisations.
|
||||
organisation.
|
||||
|
||||
## Design goal
|
||||
|
||||
@@ -67,15 +67,15 @@ one of the maintainers.
|
||||
|
||||
## Client OS support
|
||||
|
||||
| OS | Supports headscale |
|
||||
| ------- | ----------------------------------------------------------------------------------------------------------------- |
|
||||
| Linux | Yes |
|
||||
| OpenBSD | Yes |
|
||||
| FreeBSD | Yes |
|
||||
| macOS | Yes (see `/apple` on your headscale for more information) |
|
||||
| Windows | Yes [docs](./docs/windows-client.md) |
|
||||
| Android | [You need to compile the client yourself](https://github.com/juanfont/headscale/issues/58#issuecomment-885255270) |
|
||||
| iOS | Not yet |
|
||||
| OS | Supports headscale |
|
||||
| ------- | --------------------------------------------------------- |
|
||||
| Linux | Yes |
|
||||
| OpenBSD | Yes |
|
||||
| FreeBSD | Yes |
|
||||
| macOS | Yes (see `/apple` on your headscale for more information) |
|
||||
| Windows | Yes [docs](./docs/windows-client.md) |
|
||||
| Android | Yes [docs](./docs/android-client.md) |
|
||||
| iOS | Not yet |
|
||||
|
||||
## Running headscale
|
||||
|
||||
@@ -189,10 +189,26 @@ make build
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ohdearaugustin>
|
||||
<img src=https://avatars.githubusercontent.com/u/14001491?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ohdearaugustin/>
|
||||
<a href=https://github.com/huskyii>
|
||||
<img src=https://avatars.githubusercontent.com/u/5499746?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jiang Zhu/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>ohdearaugustin</b></sub>
|
||||
<sub style="font-size:14px"><b>Jiang Zhu</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/tsujamin>
|
||||
<img src=https://avatars.githubusercontent.com/u/2435619?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Benjamin Roberts/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Benjamin Roberts</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/reynico>
|
||||
<img src=https://avatars.githubusercontent.com/u/715768?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Nico/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Nico</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
@@ -202,8 +218,6 @@ make build
|
||||
<sub style="font-size:14px"><b>e-zk</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/arch4ngel>
|
||||
<img src=https://avatars.githubusercontent.com/u/11574161?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Justin Angel/>
|
||||
@@ -218,13 +232,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Alessandro (Ale) Segala</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/reynico>
|
||||
<img src=https://avatars.githubusercontent.com/u/715768?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Nico/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Nico</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/unreality>
|
||||
<img src=https://avatars.githubusercontent.com/u/352522?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=unreality/>
|
||||
@@ -232,6 +239,15 @@ make build
|
||||
<sub style="font-size:14px"><b>unreality</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ohdearaugustin>
|
||||
<img src=https://avatars.githubusercontent.com/u/14001491?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ohdearaugustin/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>ohdearaugustin</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/mpldr>
|
||||
<img src=https://avatars.githubusercontent.com/u/33086936?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Moritz Poldrack/>
|
||||
@@ -239,6 +255,20 @@ make build
|
||||
<sub style="font-size:14px"><b>Moritz Poldrack</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/GrigoriyMikhalkin>
|
||||
<img src=https://avatars.githubusercontent.com/u/3637857?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=GrigoriyMikhalkin/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>GrigoriyMikhalkin</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/mike-lloyd03>
|
||||
<img src=https://avatars.githubusercontent.com/u/49411532?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mike Lloyd/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Mike Lloyd</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/Niek>
|
||||
<img src=https://avatars.githubusercontent.com/u/213140?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Niek van der Maas/>
|
||||
@@ -246,8 +276,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Niek van der Maas</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/negbie>
|
||||
<img src=https://avatars.githubusercontent.com/u/20154956?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Eugen Biegler/>
|
||||
@@ -255,6 +283,22 @@ make build
|
||||
<sub style="font-size:14px"><b>Eugen Biegler</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/617a7a>
|
||||
<img src=https://avatars.githubusercontent.com/u/67651251?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Azz/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Azz</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/iSchluff>
|
||||
<img src=https://avatars.githubusercontent.com/u/1429641?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Anton Schubert/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Anton Schubert</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/qbit>
|
||||
<img src=https://avatars.githubusercontent.com/u/68368?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Aaron Bieber/>
|
||||
@@ -262,6 +306,20 @@ make build
|
||||
<sub style="font-size:14px"><b>Aaron Bieber</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/kazauwa>
|
||||
<img src=https://avatars.githubusercontent.com/u/12330159?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Igor Perepilitsyn/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Igor Perepilitsyn</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/Aluxima>
|
||||
<img src=https://avatars.githubusercontent.com/u/16262531?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Laurent Marchaud/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Laurent Marchaud</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/fdelucchijr>
|
||||
<img src=https://avatars.githubusercontent.com/u/69133647?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Fernando De Lucchi/>
|
||||
@@ -271,9 +329,39 @@ make build
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/hdhoang>
|
||||
<img src=https://avatars.githubusercontent.com/u/12537?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Hoàng Đức Hiếu/>
|
||||
<img src=https://avatars.githubusercontent.com/u/12537?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=hdhoang/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Hoàng Đức Hiếu</b></sub>
|
||||
<sub style="font-size:14px"><b>hdhoang</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/bravechamp>
|
||||
<img src=https://avatars.githubusercontent.com/u/48980452?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=bravechamp/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>bravechamp</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/deonthomasgy>
|
||||
<img src=https://avatars.githubusercontent.com/u/150036?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Deon Thomas/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Deon Thomas</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/madjam002>
|
||||
<img src=https://avatars.githubusercontent.com/u/679137?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jamie Greeff/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Jamie Greeff</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ChibangLW>
|
||||
<img src=https://avatars.githubusercontent.com/u/22293464?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ChibangLW/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>ChibangLW</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
@@ -299,6 +387,27 @@ make build
|
||||
<sub style="font-size:14px"><b>Paul Tötterman</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/samson4649>
|
||||
<img src=https://avatars.githubusercontent.com/u/12725953?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Samuel Lock/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Samuel Lock</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/majst01>
|
||||
<img src=https://avatars.githubusercontent.com/u/410110?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Stefan Majer/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Stefan Majer</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/kevin1sMe>
|
||||
<img src=https://avatars.githubusercontent.com/u/6886076?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=kevinlin/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>kevinlin</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/artemklevtsov>
|
||||
<img src=https://avatars.githubusercontent.com/u/603798?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Artem Klevtsov/>
|
||||
@@ -313,6 +422,22 @@ make build
|
||||
<sub style="font-size:14px"><b>Casey Marshall</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/CNLHC>
|
||||
<img src=https://avatars.githubusercontent.com/u/21005146?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=LiuHanCheng/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>LiuHanCheng</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/pvinis>
|
||||
<img src=https://avatars.githubusercontent.com/u/100233?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pavlos Vinieratos/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Pavlos Vinieratos</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/SilverBut>
|
||||
<img src=https://avatars.githubusercontent.com/u/6560655?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Silver Bullet/>
|
||||
@@ -321,10 +446,10 @@ make build
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/majst01>
|
||||
<img src=https://avatars.githubusercontent.com/u/410110?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Stefan Majer/>
|
||||
<a href=https://github.com/ratsclub>
|
||||
<img src=https://avatars.githubusercontent.com/u/25647735?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Victor Freire/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Stefan Majer</b></sub>
|
||||
<sub style="font-size:14px"><b>Victor Freire</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
@@ -334,8 +459,6 @@ make build
|
||||
<sub style="font-size:14px"><b>lachy2849</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/t56k>
|
||||
<img src=https://avatars.githubusercontent.com/u/12165422?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=thomas/>
|
||||
@@ -343,6 +466,8 @@ make build
|
||||
<sub style="font-size:14px"><b>thomas</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/aberoham>
|
||||
<img src=https://avatars.githubusercontent.com/u/586805?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Abraham Ingersoll/>
|
||||
@@ -350,6 +475,20 @@ make build
|
||||
<sub style="font-size:14px"><b>Abraham Ingersoll</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/puzpuzpuz>
|
||||
<img src=https://avatars.githubusercontent.com/u/37772591?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Andrei Pechkurov/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Andrei Pechkurov</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/apognu>
|
||||
<img src=https://avatars.githubusercontent.com/u/3017182?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Antoine POPINEAU/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Antoine POPINEAU</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/aofei>
|
||||
<img src=https://avatars.githubusercontent.com/u/5037285?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Aofei Sheng/>
|
||||
@@ -371,6 +510,8 @@ make build
|
||||
<sub style="font-size:14px"><b>Bryan Stenson</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/yangchuansheng>
|
||||
<img src=https://avatars.githubusercontent.com/u/15308462?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt= Carson Yang/>
|
||||
@@ -378,8 +519,13 @@ make build
|
||||
<sub style="font-size:14px"><b> Carson Yang</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/kundel>
|
||||
<img src=https://avatars.githubusercontent.com/u/10158899?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=kundel/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>kundel</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/fkr>
|
||||
<img src=https://avatars.githubusercontent.com/u/51063?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Felix Kronlage-Dammers/>
|
||||
@@ -401,13 +547,6 @@ make build
|
||||
<sub style="font-size:14px"><b>JJGadgets</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/madjam002>
|
||||
<img src=https://avatars.githubusercontent.com/u/679137?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jamie Greeff/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Jamie Greeff</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/jimt>
|
||||
<img src=https://avatars.githubusercontent.com/u/180326?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jim Tittsler/>
|
||||
@@ -415,6 +554,15 @@ make build
|
||||
<sub style="font-size:14px"><b>Jim Tittsler</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ShadowJonathan>
|
||||
<img src=https://avatars.githubusercontent.com/u/22740616?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jonathan de Jong/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Jonathan de Jong</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/piec>
|
||||
<img src=https://avatars.githubusercontent.com/u/781471?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pierre Carru/>
|
||||
@@ -422,8 +570,20 @@ make build
|
||||
<sub style="font-size:14px"><b>Pierre Carru</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/Donran>
|
||||
<img src=https://avatars.githubusercontent.com/u/4838348?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pontus N/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Pontus N</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/nnsee>
|
||||
<img src=https://avatars.githubusercontent.com/u/36747857?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Rasmus Moorats/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Rasmus Moorats</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/rcursaru>
|
||||
<img src=https://avatars.githubusercontent.com/u/16259641?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=rcursaru/>
|
||||
@@ -433,11 +593,13 @@ make build
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/renovate-bot>
|
||||
<img src=https://avatars.githubusercontent.com/u/25180681?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=WhiteSource Renovate/>
|
||||
<img src=https://avatars.githubusercontent.com/u/25180681?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mend Renovate/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>WhiteSource Renovate</b></sub>
|
||||
<sub style="font-size:14px"><b>Mend Renovate</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ryanfowler>
|
||||
<img src=https://avatars.githubusercontent.com/u/2668821?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Ryan Fowler/>
|
||||
@@ -452,6 +614,20 @@ make build
|
||||
<sub style="font-size:14px"><b>Shaanan Cohney</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/stefanvanburen>
|
||||
<img src=https://avatars.githubusercontent.com/u/622527?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Stefan VanBuren/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Stefan VanBuren</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/sophware>
|
||||
<img src=https://avatars.githubusercontent.com/u/41669?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=sophware/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>sophware</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/m-tanner-dev0>
|
||||
<img src=https://avatars.githubusercontent.com/u/97977342?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tanner/>
|
||||
@@ -482,6 +658,13 @@ make build
|
||||
<sub style="font-size:14px"><b>Tianon Gravi</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/thetillhoff>
|
||||
<img src=https://avatars.githubusercontent.com/u/25052289?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Till Hoffmann/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Till Hoffmann</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/woudsma>
|
||||
<img src=https://avatars.githubusercontent.com/u/6162978?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tjerk Woudsma/>
|
||||
@@ -496,6 +679,15 @@ make build
|
||||
<sub style="font-size:14px"><b>Yang Bin</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/gozssky>
|
||||
<img src=https://avatars.githubusercontent.com/u/17199941?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Yujie Xia/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Yujie Xia</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/zekker6>
|
||||
<img src=https://avatars.githubusercontent.com/u/1367798?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zakhar Bessarab/>
|
||||
@@ -504,19 +696,17 @@ make build
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/Bpazy>
|
||||
<img src=https://avatars.githubusercontent.com/u/9838749?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ZiYuan/>
|
||||
<a href=https://github.com/zhzy0077>
|
||||
<img src=https://avatars.githubusercontent.com/u/8717471?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zhiyuan Zheng/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>ZiYuan</b></sub>
|
||||
<sub style="font-size:14px"><b>Zhiyuan Zheng</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/bravechamp>
|
||||
<img src=https://avatars.githubusercontent.com/u/48980452?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=bravechamp/>
|
||||
<a href=https://github.com/Bpazy>
|
||||
<img src=https://avatars.githubusercontent.com/u/9838749?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Ziyuan Han/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>bravechamp</b></sub>
|
||||
<sub style="font-size:14px"><b>Ziyuan Han</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
@@ -540,11 +730,13 @@ make build
|
||||
<sub style="font-size:14px"><b>ignoramous</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/lion24>
|
||||
<img src=https://avatars.githubusercontent.com/u/1382102?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=lion24/>
|
||||
<img src=https://avatars.githubusercontent.com/u/1382102?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=sharkonet/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>lion24</b></sub>
|
||||
<sub style="font-size:14px"><b>sharkonet</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
@@ -554,8 +746,13 @@ make build
|
||||
<sub style="font-size:14px"><b>pernila</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/phpmalik>
|
||||
<img src=https://avatars.githubusercontent.com/u/26834645?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=phpmalik/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>phpmalik</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/Wakeful-Cloud>
|
||||
<img src=https://avatars.githubusercontent.com/u/38930607?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Wakeful-Cloud/>
|
||||
|
||||
169
acls.go
169
acls.go
@@ -2,8 +2,10 @@ package headscale
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/netip"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@@ -12,7 +14,6 @@ import (
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/tailscale/hujson"
|
||||
"gopkg.in/yaml.v3"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
@@ -22,6 +23,7 @@ const (
|
||||
errInvalidGroup = Error("invalid group")
|
||||
errInvalidTag = Error("invalid tag")
|
||||
errInvalidPortFormat = Error("invalid port format")
|
||||
errWildcardIsNeeded = Error("wildcard as port is required for the protocol")
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -35,6 +37,23 @@ const (
|
||||
expectedTokenItems = 2
|
||||
)
|
||||
|
||||
// For some reason golang.org/x/net/internal/iana is an internal package.
|
||||
const (
|
||||
protocolICMP = 1 // Internet Control Message
|
||||
protocolIGMP = 2 // Internet Group Management
|
||||
protocolIPv4 = 4 // IPv4 encapsulation
|
||||
protocolTCP = 6 // Transmission Control
|
||||
protocolEGP = 8 // Exterior Gateway Protocol
|
||||
protocolIGP = 9 // any private interior gateway (used by Cisco for their IGRP)
|
||||
protocolUDP = 17 // User Datagram
|
||||
protocolGRE = 47 // Generic Routing Encapsulation
|
||||
protocolESP = 50 // Encap Security Payload
|
||||
protocolAH = 51 // Authentication Header
|
||||
protocolIPv6ICMP = 58 // ICMP for IPv6
|
||||
protocolSCTP = 132 // Stream Control Transmission Protocol
|
||||
ProtocolFC = 133 // Fibre Channel
|
||||
)
|
||||
|
||||
// LoadACLPolicy loads the ACL policy from the specify path, and generates the ACL rules.
|
||||
func (h *Headscale) LoadACLPolicy(path string) error {
|
||||
log.Debug().
|
||||
@@ -122,23 +141,36 @@ func (h *Headscale) generateACLRules() ([]tailcfg.FilterRule, error) {
|
||||
}
|
||||
|
||||
srcIPs := []string{}
|
||||
for innerIndex, user := range acl.Users {
|
||||
srcs, err := h.generateACLPolicySrcIP(machines, *h.aclPolicy, user)
|
||||
for innerIndex, src := range acl.Sources {
|
||||
srcs, err := h.generateACLPolicySrcIP(machines, *h.aclPolicy, src)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing ACL %d, User %d", index, innerIndex)
|
||||
Msgf("Error parsing ACL %d, Source %d", index, innerIndex)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
srcIPs = append(srcIPs, srcs...)
|
||||
}
|
||||
|
||||
protocols, needsWildcard, err := parseProtocol(acl.Protocol)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing ACL %d. protocol unknown %s", index, acl.Protocol)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
destPorts := []tailcfg.NetPortRange{}
|
||||
for innerIndex, ports := range acl.Ports {
|
||||
dests, err := h.generateACLPolicyDestPorts(machines, *h.aclPolicy, ports)
|
||||
for innerIndex, dest := range acl.Destinations {
|
||||
dests, err := h.generateACLPolicyDest(
|
||||
machines,
|
||||
*h.aclPolicy,
|
||||
dest,
|
||||
needsWildcard,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing ACL %d, Port %d", index, innerIndex)
|
||||
Msgf("Error parsing ACL %d, Destination %d", index, innerIndex)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
@@ -148,6 +180,7 @@ func (h *Headscale) generateACLRules() ([]tailcfg.FilterRule, error) {
|
||||
rules = append(rules, tailcfg.FilterRule{
|
||||
SrcIPs: srcIPs,
|
||||
DstPorts: destPorts,
|
||||
IPProto: protocols,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -157,17 +190,18 @@ func (h *Headscale) generateACLRules() ([]tailcfg.FilterRule, error) {
|
||||
func (h *Headscale) generateACLPolicySrcIP(
|
||||
machines []Machine,
|
||||
aclPolicy ACLPolicy,
|
||||
u string,
|
||||
src string,
|
||||
) ([]string, error) {
|
||||
return expandAlias(machines, aclPolicy, u, h.cfg.OIDC.StripEmaildomain)
|
||||
return expandAlias(machines, aclPolicy, src, h.cfg.OIDC.StripEmaildomain)
|
||||
}
|
||||
|
||||
func (h *Headscale) generateACLPolicyDestPorts(
|
||||
func (h *Headscale) generateACLPolicyDest(
|
||||
machines []Machine,
|
||||
aclPolicy ACLPolicy,
|
||||
d string,
|
||||
dest string,
|
||||
needsWildcard bool,
|
||||
) ([]tailcfg.NetPortRange, error) {
|
||||
tokens := strings.Split(d, ":")
|
||||
tokens := strings.Split(dest, ":")
|
||||
if len(tokens) < expectedTokenItems || len(tokens) > 3 {
|
||||
return nil, errInvalidPortFormat
|
||||
}
|
||||
@@ -194,7 +228,7 @@ func (h *Headscale) generateACLPolicyDestPorts(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ports, err := expandPorts(tokens[len(tokens)-1])
|
||||
ports, err := expandPorts(tokens[len(tokens)-1], needsWildcard)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -213,6 +247,61 @@ func (h *Headscale) generateACLPolicyDestPorts(
|
||||
return dests, nil
|
||||
}
|
||||
|
||||
// parseProtocol reads the proto field of the ACL and generates a list of
|
||||
// protocols that will be allowed, following the IANA IP protocol number
|
||||
// https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
|
||||
//
|
||||
// If the ACL proto field is empty, it allows ICMPv4, ICMPv6, TCP, and UDP,
|
||||
// as per Tailscale behaviour (see tailcfg.FilterRule).
|
||||
//
|
||||
// Also returns a boolean indicating if the protocol
|
||||
// requires all the destinations to use wildcard as port number (only TCP,
|
||||
// UDP and SCTP support specifying ports).
|
||||
func parseProtocol(protocol string) ([]int, bool, error) {
|
||||
switch protocol {
|
||||
case "":
|
||||
return []int{
|
||||
protocolICMP,
|
||||
protocolIPv6ICMP,
|
||||
protocolTCP,
|
||||
protocolUDP,
|
||||
}, false, nil
|
||||
case "igmp":
|
||||
return []int{protocolIGMP}, true, nil
|
||||
case "ipv4", "ip-in-ip":
|
||||
return []int{protocolIPv4}, true, nil
|
||||
case "tcp":
|
||||
return []int{protocolTCP}, false, nil
|
||||
case "egp":
|
||||
return []int{protocolEGP}, true, nil
|
||||
case "igp":
|
||||
return []int{protocolIGP}, true, nil
|
||||
case "udp":
|
||||
return []int{protocolUDP}, false, nil
|
||||
case "gre":
|
||||
return []int{protocolGRE}, true, nil
|
||||
case "esp":
|
||||
return []int{protocolESP}, true, nil
|
||||
case "ah":
|
||||
return []int{protocolAH}, true, nil
|
||||
case "sctp":
|
||||
return []int{protocolSCTP}, false, nil
|
||||
case "icmp":
|
||||
return []int{protocolICMP, protocolIPv6ICMP}, true, nil
|
||||
|
||||
default:
|
||||
protocolNumber, err := strconv.Atoi(protocol)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
needsWildcard := protocolNumber != protocolTCP &&
|
||||
protocolNumber != protocolUDP &&
|
||||
protocolNumber != protocolSCTP
|
||||
|
||||
return []int{protocolNumber}, needsWildcard, nil
|
||||
}
|
||||
}
|
||||
|
||||
// expandalias has an input of either
|
||||
// - a namespace
|
||||
// - a group
|
||||
@@ -249,18 +338,38 @@ func expandAlias(
|
||||
}
|
||||
|
||||
if strings.HasPrefix(alias, "tag:") {
|
||||
// check for forced tags
|
||||
for _, machine := range machines {
|
||||
if contains(machine.ForcedTags, alias) {
|
||||
ips = append(ips, machine.IPAddresses.ToStringSlice()...)
|
||||
}
|
||||
}
|
||||
|
||||
// find tag owners
|
||||
owners, err := expandTagOwners(aclPolicy, alias, stripEmailDomain)
|
||||
if err != nil {
|
||||
return ips, err
|
||||
if errors.Is(err, errInvalidTag) {
|
||||
if len(ips) == 0 {
|
||||
return ips, fmt.Errorf(
|
||||
"%w. %v isn't owned by a TagOwner and no forced tags are defined",
|
||||
errInvalidTag,
|
||||
alias,
|
||||
)
|
||||
}
|
||||
|
||||
return ips, nil
|
||||
} else {
|
||||
return ips, err
|
||||
}
|
||||
}
|
||||
|
||||
// filter out machines per tag owner
|
||||
for _, namespace := range owners {
|
||||
machines := filterMachinesByNamespace(machines, namespace)
|
||||
for _, machine := range machines {
|
||||
hi := machine.GetHostInfo()
|
||||
for _, t := range hi.RequestTags {
|
||||
if alias == t {
|
||||
ips = append(ips, machine.IPAddresses.ToStringSlice()...)
|
||||
}
|
||||
if contains(hi.RequestTags, alias) {
|
||||
ips = append(ips, machine.IPAddresses.ToStringSlice()...)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -270,7 +379,7 @@ func expandAlias(
|
||||
|
||||
// if alias is a namespace
|
||||
nodes := filterMachinesByNamespace(machines, alias)
|
||||
nodes = excludeCorrectlyTaggedNodes(aclPolicy, nodes, alias)
|
||||
nodes = excludeCorrectlyTaggedNodes(aclPolicy, nodes, alias, stripEmailDomain)
|
||||
|
||||
for _, n := range nodes {
|
||||
ips = append(ips, n.IPAddresses.ToStringSlice()...)
|
||||
@@ -285,13 +394,13 @@ func expandAlias(
|
||||
}
|
||||
|
||||
// if alias is an IP
|
||||
ip, err := netaddr.ParseIP(alias)
|
||||
ip, err := netip.ParseAddr(alias)
|
||||
if err == nil {
|
||||
return []string{ip.String()}, nil
|
||||
}
|
||||
|
||||
// if alias is an CIDR
|
||||
cidr, err := netaddr.ParseIPPrefix(alias)
|
||||
cidr, err := netip.ParsePrefix(alias)
|
||||
if err == nil {
|
||||
return []string{cidr.String()}, nil
|
||||
}
|
||||
@@ -308,11 +417,14 @@ func excludeCorrectlyTaggedNodes(
|
||||
aclPolicy ACLPolicy,
|
||||
nodes []Machine,
|
||||
namespace string,
|
||||
stripEmailDomain bool,
|
||||
) []Machine {
|
||||
out := []Machine{}
|
||||
tags := []string{}
|
||||
for tag, ns := range aclPolicy.TagOwners {
|
||||
if containsString(ns, namespace) {
|
||||
for tag := range aclPolicy.TagOwners {
|
||||
owners, _ := expandTagOwners(aclPolicy, namespace, stripEmailDomain)
|
||||
ns := append(owners, namespace)
|
||||
if contains(ns, namespace) {
|
||||
tags = append(tags, tag)
|
||||
}
|
||||
}
|
||||
@@ -322,12 +434,15 @@ func excludeCorrectlyTaggedNodes(
|
||||
|
||||
found := false
|
||||
for _, t := range hi.RequestTags {
|
||||
if containsString(tags, t) {
|
||||
if contains(tags, t) {
|
||||
found = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(machine.ForcedTags) > 0 {
|
||||
found = true
|
||||
}
|
||||
if !found {
|
||||
out = append(out, machine)
|
||||
}
|
||||
@@ -336,13 +451,17 @@ func excludeCorrectlyTaggedNodes(
|
||||
return out
|
||||
}
|
||||
|
||||
func expandPorts(portsStr string) (*[]tailcfg.PortRange, error) {
|
||||
func expandPorts(portsStr string, needsWildcard bool) (*[]tailcfg.PortRange, error) {
|
||||
if portsStr == "*" {
|
||||
return &[]tailcfg.PortRange{
|
||||
{First: portRangeBegin, Last: portRangeEnd},
|
||||
}, nil
|
||||
}
|
||||
|
||||
if needsWildcard {
|
||||
return nil, errWildcardIsNeeded
|
||||
}
|
||||
|
||||
ports := []tailcfg.PortRange{}
|
||||
for _, portStr := range strings.Split(portsStr, ",") {
|
||||
rang := strings.Split(portStr, "-")
|
||||
|
||||
399
acls_test.go
399
acls_test.go
@@ -2,11 +2,11 @@ package headscale
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/netip"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
@@ -62,7 +62,11 @@ func (s *Suite) TestBasicRule(c *check.C) {
|
||||
func (s *Suite) TestInvalidAction(c *check.C) {
|
||||
app.aclPolicy = &ACLPolicy{
|
||||
ACLs: []ACL{
|
||||
{Action: "invalidAction", Users: []string{"*"}, Ports: []string{"*:*"}},
|
||||
{
|
||||
Action: "invalidAction",
|
||||
Sources: []string{"*"},
|
||||
Destinations: []string{"*:*"},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := app.UpdateACLRules()
|
||||
@@ -70,14 +74,18 @@ func (s *Suite) TestInvalidAction(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestInvalidGroupInGroup(c *check.C) {
|
||||
// this ACL is wrong because the group in users sections doesn't exist
|
||||
// this ACL is wrong because the group in Sources sections doesn't exist
|
||||
app.aclPolicy = &ACLPolicy{
|
||||
Groups: Groups{
|
||||
"group:test": []string{"foo"},
|
||||
"group:error": []string{"foo", "group:test"},
|
||||
},
|
||||
ACLs: []ACL{
|
||||
{Action: "accept", Users: []string{"group:error"}, Ports: []string{"*:*"}},
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"group:error"},
|
||||
Destinations: []string{"*:*"},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := app.UpdateACLRules()
|
||||
@@ -88,7 +96,11 @@ func (s *Suite) TestInvalidTagOwners(c *check.C) {
|
||||
// this ACL is wrong because no tagOwners own the requested tag for the server
|
||||
app.aclPolicy = &ACLPolicy{
|
||||
ACLs: []ACL{
|
||||
{Action: "accept", Users: []string{"tag:foo"}, Ports: []string{"*:*"}},
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"tag:foo"},
|
||||
Destinations: []string{"*:*"},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := app.UpdateACLRules()
|
||||
@@ -97,12 +109,12 @@ func (s *Suite) TestInvalidTagOwners(c *check.C) {
|
||||
|
||||
// this test should validate that we can expand a group in a TagOWner section and
|
||||
// match properly the IP's of the related hosts. The owner is valid and the tag is also valid.
|
||||
// the tag is matched in the Users section.
|
||||
func (s *Suite) TestValidExpandTagOwnersInUsers(c *check.C) {
|
||||
// the tag is matched in the Sources section.
|
||||
func (s *Suite) TestValidExpandTagOwnersInSources(c *check.C) {
|
||||
namespace, err := app.CreateNamespace("user1")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine("user1", "testmachine")
|
||||
@@ -118,8 +130,8 @@ func (s *Suite) TestValidExpandTagOwnersInUsers(c *check.C) {
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Name: "testmachine",
|
||||
IPAddresses: MachineAddresses{netaddr.MustParseIP("100.64.0.1")},
|
||||
Hostname: "testmachine",
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.1")},
|
||||
NamespaceID: namespace.ID,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
@@ -131,7 +143,11 @@ func (s *Suite) TestValidExpandTagOwnersInUsers(c *check.C) {
|
||||
Groups: Groups{"group:test": []string{"user1", "user2"}},
|
||||
TagOwners: TagOwners{"tag:test": []string{"user3", "group:test"}},
|
||||
ACLs: []ACL{
|
||||
{Action: "accept", Users: []string{"tag:test"}, Ports: []string{"*:*"}},
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"tag:test"},
|
||||
Destinations: []string{"*:*"},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = app.UpdateACLRules()
|
||||
@@ -143,12 +159,12 @@ func (s *Suite) TestValidExpandTagOwnersInUsers(c *check.C) {
|
||||
|
||||
// this test should validate that we can expand a group in a TagOWner section and
|
||||
// match properly the IP's of the related hosts. The owner is valid and the tag is also valid.
|
||||
// the tag is matched in the Ports section.
|
||||
func (s *Suite) TestValidExpandTagOwnersInPorts(c *check.C) {
|
||||
// the tag is matched in the Destinations section.
|
||||
func (s *Suite) TestValidExpandTagOwnersInDestinations(c *check.C) {
|
||||
namespace, err := app.CreateNamespace("user1")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine("user1", "testmachine")
|
||||
@@ -164,8 +180,8 @@ func (s *Suite) TestValidExpandTagOwnersInPorts(c *check.C) {
|
||||
MachineKey: "12345",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Name: "testmachine",
|
||||
IPAddresses: MachineAddresses{netaddr.MustParseIP("100.64.0.1")},
|
||||
Hostname: "testmachine",
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.1")},
|
||||
NamespaceID: namespace.ID,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
@@ -177,7 +193,11 @@ func (s *Suite) TestValidExpandTagOwnersInPorts(c *check.C) {
|
||||
Groups: Groups{"group:test": []string{"user1", "user2"}},
|
||||
TagOwners: TagOwners{"tag:test": []string{"user3", "group:test"}},
|
||||
ACLs: []ACL{
|
||||
{Action: "accept", Users: []string{"*"}, Ports: []string{"tag:test:*"}},
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"*"},
|
||||
Destinations: []string{"tag:test:*"},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = app.UpdateACLRules()
|
||||
@@ -194,7 +214,7 @@ func (s *Suite) TestInvalidTagValidNamespace(c *check.C) {
|
||||
namespace, err := app.CreateNamespace("user1")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine("user1", "testmachine")
|
||||
@@ -210,8 +230,8 @@ func (s *Suite) TestInvalidTagValidNamespace(c *check.C) {
|
||||
MachineKey: "12345",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Name: "testmachine",
|
||||
IPAddresses: MachineAddresses{netaddr.MustParseIP("100.64.0.1")},
|
||||
Hostname: "testmachine",
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.1")},
|
||||
NamespaceID: namespace.ID,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
@@ -222,7 +242,11 @@ func (s *Suite) TestInvalidTagValidNamespace(c *check.C) {
|
||||
app.aclPolicy = &ACLPolicy{
|
||||
TagOwners: TagOwners{"tag:test": []string{"user1"}},
|
||||
ACLs: []ACL{
|
||||
{Action: "accept", Users: []string{"user1"}, Ports: []string{"*:*"}},
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"user1"},
|
||||
Destinations: []string{"*:*"},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = app.UpdateACLRules()
|
||||
@@ -239,7 +263,7 @@ func (s *Suite) TestValidTagInvalidNamespace(c *check.C) {
|
||||
namespace, err := app.CreateNamespace("user1")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine("user1", "webserver")
|
||||
@@ -255,8 +279,8 @@ func (s *Suite) TestValidTagInvalidNamespace(c *check.C) {
|
||||
MachineKey: "12345",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Name: "webserver",
|
||||
IPAddresses: MachineAddresses{netaddr.MustParseIP("100.64.0.1")},
|
||||
Hostname: "webserver",
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.1")},
|
||||
NamespaceID: namespace.ID,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
@@ -274,8 +298,8 @@ func (s *Suite) TestValidTagInvalidNamespace(c *check.C) {
|
||||
MachineKey: "56789",
|
||||
NodeKey: "bar2",
|
||||
DiscoKey: "faab",
|
||||
Name: "user",
|
||||
IPAddresses: MachineAddresses{netaddr.MustParseIP("100.64.0.2")},
|
||||
Hostname: "user",
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.2")},
|
||||
NamespaceID: namespace.ID,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
@@ -287,9 +311,9 @@ func (s *Suite) TestValidTagInvalidNamespace(c *check.C) {
|
||||
TagOwners: TagOwners{"tag:webapp": []string{"user1"}},
|
||||
ACLs: []ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Users: []string{"user1"},
|
||||
Ports: []string{"tag:webapp:80,443"},
|
||||
Action: "accept",
|
||||
Sources: []string{"user1"},
|
||||
Destinations: []string{"tag:webapp:80,443"},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -321,6 +345,20 @@ func (s *Suite) TestPortRange(c *check.C) {
|
||||
c.Assert(rules[0].DstPorts[0].Ports.Last, check.Equals, uint16(5500))
|
||||
}
|
||||
|
||||
func (s *Suite) TestProtocolParsing(c *check.C) {
|
||||
err := app.LoadACLPolicy("./tests/acls/acl_policy_basic_protocols.hujson")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := app.generateACLRules()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
|
||||
c.Assert(rules, check.HasLen, 3)
|
||||
c.Assert(rules[0].IPProto[0], check.Equals, protocolTCP)
|
||||
c.Assert(rules[1].IPProto[0], check.Equals, protocolUDP)
|
||||
c.Assert(rules[2].IPProto[1], check.Equals, protocolIPv6ICMP)
|
||||
}
|
||||
|
||||
func (s *Suite) TestPortWildcard(c *check.C) {
|
||||
err := app.LoadACLPolicy("./tests/acls/acl_policy_basic_wildcards.hujson")
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -357,7 +395,7 @@ func (s *Suite) TestPortNamespace(c *check.C) {
|
||||
namespace, err := app.CreateNamespace("testnamespace")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine("testnamespace", "testmachine")
|
||||
@@ -368,7 +406,7 @@ func (s *Suite) TestPortNamespace(c *check.C) {
|
||||
MachineKey: "12345",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Name: "testmachine",
|
||||
Hostname: "testmachine",
|
||||
NamespaceID: namespace.ID,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: ips,
|
||||
@@ -399,7 +437,7 @@ func (s *Suite) TestPortGroup(c *check.C) {
|
||||
namespace, err := app.CreateNamespace("testnamespace")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine("testnamespace", "testmachine")
|
||||
@@ -410,7 +448,7 @@ func (s *Suite) TestPortGroup(c *check.C) {
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Name: "testmachine",
|
||||
Hostname: "testmachine",
|
||||
NamespaceID: namespace.ID,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: ips,
|
||||
@@ -628,7 +666,8 @@ func Test_expandTagOwners(t *testing.T) {
|
||||
|
||||
func Test_expandPorts(t *testing.T) {
|
||||
type args struct {
|
||||
portsStr string
|
||||
portsStr string
|
||||
needsWildcard bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -638,15 +677,29 @@ func Test_expandPorts(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "wildcard",
|
||||
args: args{portsStr: "*"},
|
||||
args: args{portsStr: "*", needsWildcard: true},
|
||||
want: &[]tailcfg.PortRange{
|
||||
{First: portRangeBegin, Last: portRangeEnd},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "two ports",
|
||||
args: args{portsStr: "80,443"},
|
||||
name: "needs wildcard but does not require it",
|
||||
args: args{portsStr: "*", needsWildcard: false},
|
||||
want: &[]tailcfg.PortRange{
|
||||
{First: portRangeBegin, Last: portRangeEnd},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "needs wildcard but gets port",
|
||||
args: args{portsStr: "80,443", needsWildcard: true},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "two Destinations",
|
||||
args: args{portsStr: "80,443", needsWildcard: false},
|
||||
want: &[]tailcfg.PortRange{
|
||||
{First: 80, Last: 80},
|
||||
{First: 443, Last: 443},
|
||||
@@ -655,7 +708,7 @@ func Test_expandPorts(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "a range and a port",
|
||||
args: args{portsStr: "80-1024,443"},
|
||||
args: args{portsStr: "80-1024,443", needsWildcard: false},
|
||||
want: &[]tailcfg.PortRange{
|
||||
{First: 80, Last: 1024},
|
||||
{First: 443, Last: 443},
|
||||
@@ -664,38 +717,38 @@ func Test_expandPorts(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "out of bounds",
|
||||
args: args{portsStr: "854038"},
|
||||
args: args{portsStr: "854038", needsWildcard: false},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "wrong port",
|
||||
args: args{portsStr: "85a38"},
|
||||
args: args{portsStr: "85a38", needsWildcard: false},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "wrong port in first",
|
||||
args: args{portsStr: "a-80"},
|
||||
args: args{portsStr: "a-80", needsWildcard: false},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "wrong port in last",
|
||||
args: args{portsStr: "80-85a38"},
|
||||
args: args{portsStr: "80-85a38", needsWildcard: false},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "wrong port format",
|
||||
args: args{portsStr: "80-85a38-3"},
|
||||
args: args{portsStr: "80-85a38-3", needsWildcard: false},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
got, err := expandPorts(test.args.portsStr)
|
||||
got, err := expandPorts(test.args.portsStr, test.args.needsWildcard)
|
||||
if (err != nil) != test.wantErr {
|
||||
t.Errorf("expandPorts() error = %v, wantErr %v", err, test.wantErr)
|
||||
|
||||
@@ -772,7 +825,6 @@ func Test_listMachinesInNamespace(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// nolint
|
||||
func Test_expandAlias(t *testing.T) {
|
||||
type args struct {
|
||||
machines []Machine
|
||||
@@ -791,10 +843,10 @@ func Test_expandAlias(t *testing.T) {
|
||||
args: args{
|
||||
alias: "*",
|
||||
machines: []Machine{
|
||||
{IPAddresses: MachineAddresses{netaddr.MustParseIP("100.64.0.1")}},
|
||||
{IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.1")}},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.78.84.227"),
|
||||
netip.MustParseAddr("100.78.84.227"),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -811,25 +863,25 @@ func Test_expandAlias(t *testing.T) {
|
||||
machines: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.1"),
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.2"),
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.3"),
|
||||
netip.MustParseAddr("100.64.0.3"),
|
||||
},
|
||||
Namespace: Namespace{Name: "marc"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.4"),
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
Namespace: Namespace{Name: "mickael"},
|
||||
},
|
||||
@@ -849,25 +901,25 @@ func Test_expandAlias(t *testing.T) {
|
||||
machines: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.1"),
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.2"),
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.3"),
|
||||
netip.MustParseAddr("100.64.0.3"),
|
||||
},
|
||||
Namespace: Namespace{Name: "marc"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.4"),
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
Namespace: Namespace{Name: "mickael"},
|
||||
},
|
||||
@@ -898,7 +950,7 @@ func Test_expandAlias(t *testing.T) {
|
||||
machines: []Machine{},
|
||||
aclPolicy: ACLPolicy{
|
||||
Hosts: Hosts{
|
||||
"homeNetwork": netaddr.MustParseIPPrefix("192.168.1.0/24"),
|
||||
"homeNetwork": netip.MustParsePrefix("192.168.1.0/24"),
|
||||
},
|
||||
},
|
||||
stripEmailDomain: true,
|
||||
@@ -935,7 +987,7 @@ func Test_expandAlias(t *testing.T) {
|
||||
machines: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.1"),
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
@@ -946,7 +998,7 @@ func Test_expandAlias(t *testing.T) {
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.2"),
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
@@ -957,13 +1009,13 @@ func Test_expandAlias(t *testing.T) {
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.3"),
|
||||
netip.MustParseAddr("100.64.0.3"),
|
||||
},
|
||||
Namespace: Namespace{Name: "marc"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.4"),
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
@@ -983,25 +1035,25 @@ func Test_expandAlias(t *testing.T) {
|
||||
machines: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.1"),
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.2"),
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.3"),
|
||||
netip.MustParseAddr("100.64.0.3"),
|
||||
},
|
||||
Namespace: Namespace{Name: "marc"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.4"),
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
Namespace: Namespace{Name: "mickael"},
|
||||
},
|
||||
@@ -1017,6 +1069,90 @@ func Test_expandAlias(t *testing.T) {
|
||||
want: []string{},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Forced tag defined",
|
||||
args: args{
|
||||
alias: "tag:hr-webserver",
|
||||
machines: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
ForcedTags: []string{"tag:hr-webserver"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
ForcedTags: []string{"tag:hr-webserver"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netip.MustParseAddr("100.64.0.3"),
|
||||
},
|
||||
Namespace: Namespace{Name: "marc"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
Namespace: Namespace{Name: "mickael"},
|
||||
},
|
||||
},
|
||||
aclPolicy: ACLPolicy{},
|
||||
stripEmailDomain: true,
|
||||
},
|
||||
want: []string{"100.64.0.1", "100.64.0.2"},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Forced tag with legitimate tagOwner",
|
||||
args: args{
|
||||
alias: "tag:hr-webserver",
|
||||
machines: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
ForcedTags: []string{"tag:hr-webserver"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
OS: "centos",
|
||||
Hostname: "foo",
|
||||
RequestTags: []string{"tag:hr-webserver"},
|
||||
},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netip.MustParseAddr("100.64.0.3"),
|
||||
},
|
||||
Namespace: Namespace{Name: "marc"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
Namespace: Namespace{Name: "mickael"},
|
||||
},
|
||||
},
|
||||
aclPolicy: ACLPolicy{
|
||||
TagOwners: TagOwners{
|
||||
"tag:hr-webserver": []string{"joe"},
|
||||
},
|
||||
},
|
||||
stripEmailDomain: true,
|
||||
},
|
||||
want: []string{"100.64.0.1", "100.64.0.2"},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "list host in namespace without correctly tagged servers",
|
||||
args: args{
|
||||
@@ -1024,7 +1160,7 @@ func Test_expandAlias(t *testing.T) {
|
||||
machines: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.1"),
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
@@ -1035,7 +1171,7 @@ func Test_expandAlias(t *testing.T) {
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.2"),
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
@@ -1046,13 +1182,13 @@ func Test_expandAlias(t *testing.T) {
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.3"),
|
||||
netip.MustParseAddr("100.64.0.3"),
|
||||
},
|
||||
Namespace: Namespace{Name: "marc"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.4"),
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
@@ -1088,9 +1224,10 @@ func Test_expandAlias(t *testing.T) {
|
||||
|
||||
func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
type args struct {
|
||||
aclPolicy ACLPolicy
|
||||
nodes []Machine
|
||||
namespace string
|
||||
aclPolicy ACLPolicy
|
||||
nodes []Machine
|
||||
namespace string
|
||||
stripEmailDomain bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -1107,7 +1244,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
nodes: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.1"),
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
@@ -1118,7 +1255,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.2"),
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
@@ -1129,16 +1266,110 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.4"),
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
},
|
||||
namespace: "joe",
|
||||
namespace: "joe",
|
||||
stripEmailDomain: true,
|
||||
},
|
||||
want: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{netaddr.MustParseIP("100.64.0.4")},
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.4")},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "exclude nodes with valid tags, and owner is in a group",
|
||||
args: args{
|
||||
aclPolicy: ACLPolicy{
|
||||
Groups: Groups{
|
||||
"group:accountant": []string{"joe", "bar"},
|
||||
},
|
||||
TagOwners: TagOwners{
|
||||
"tag:accountant-webserver": []string{"group:accountant"},
|
||||
},
|
||||
},
|
||||
nodes: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
OS: "centos",
|
||||
Hostname: "foo",
|
||||
RequestTags: []string{"tag:accountant-webserver"},
|
||||
},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
OS: "centos",
|
||||
Hostname: "foo",
|
||||
RequestTags: []string{"tag:accountant-webserver"},
|
||||
},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
},
|
||||
namespace: "joe",
|
||||
stripEmailDomain: true,
|
||||
},
|
||||
want: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.4")},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "exclude nodes with valid tags and with forced tags",
|
||||
args: args{
|
||||
aclPolicy: ACLPolicy{
|
||||
TagOwners: TagOwners{"tag:accountant-webserver": []string{"joe"}},
|
||||
},
|
||||
nodes: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
OS: "centos",
|
||||
Hostname: "foo",
|
||||
RequestTags: []string{"tag:accountant-webserver"},
|
||||
},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
ForcedTags: []string{"tag:accountant-webserver"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
},
|
||||
namespace: "joe",
|
||||
stripEmailDomain: true,
|
||||
},
|
||||
want: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.4")},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
},
|
||||
@@ -1152,7 +1383,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
nodes: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.1"),
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
@@ -1163,7 +1394,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.2"),
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
@@ -1174,17 +1405,18 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.4"),
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
},
|
||||
namespace: "joe",
|
||||
namespace: "joe",
|
||||
stripEmailDomain: true,
|
||||
},
|
||||
want: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.1"),
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
@@ -1195,7 +1427,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.2"),
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
@@ -1206,7 +1438,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.4"),
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
@@ -1219,6 +1451,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
test.args.aclPolicy,
|
||||
test.args.nodes,
|
||||
test.args.namespace,
|
||||
test.args.stripEmailDomain,
|
||||
)
|
||||
if !reflect.DeepEqual(got, test.want) {
|
||||
t.Errorf("excludeCorrectlyTaggedNodes() = %v, want %v", got, test.want)
|
||||
|
||||
@@ -2,46 +2,55 @@ package headscale
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/netip"
|
||||
"strings"
|
||||
|
||||
"github.com/tailscale/hujson"
|
||||
"gopkg.in/yaml.v3"
|
||||
"inet.af/netaddr"
|
||||
)
|
||||
|
||||
// ACLPolicy represents a Tailscale ACL Policy.
|
||||
type ACLPolicy struct {
|
||||
Groups Groups `json:"Groups" yaml:"Groups"`
|
||||
Hosts Hosts `json:"Hosts" yaml:"Hosts"`
|
||||
TagOwners TagOwners `json:"TagOwners" yaml:"TagOwners"`
|
||||
ACLs []ACL `json:"ACLs" yaml:"ACLs"`
|
||||
Tests []ACLTest `json:"Tests" yaml:"Tests"`
|
||||
Groups Groups `json:"groups" yaml:"groups"`
|
||||
Hosts Hosts `json:"hosts" yaml:"hosts"`
|
||||
TagOwners TagOwners `json:"tagOwners" yaml:"tagOwners"`
|
||||
ACLs []ACL `json:"acls" yaml:"acls"`
|
||||
Tests []ACLTest `json:"tests" yaml:"tests"`
|
||||
AutoApprovers AutoApprovers `json:"autoApprovers" yaml:"autoApprovers"`
|
||||
}
|
||||
|
||||
// ACL is a basic rule for the ACL Policy.
|
||||
type ACL struct {
|
||||
Action string `json:"Action" yaml:"Action"`
|
||||
Users []string `json:"Users" yaml:"Users"`
|
||||
Ports []string `json:"Ports" yaml:"Ports"`
|
||||
Action string `json:"action" yaml:"action"`
|
||||
Protocol string `json:"proto" yaml:"proto"`
|
||||
Sources []string `json:"src" yaml:"src"`
|
||||
Destinations []string `json:"dst" yaml:"dst"`
|
||||
}
|
||||
|
||||
// Groups references a series of alias in the ACL rules.
|
||||
type Groups map[string][]string
|
||||
|
||||
// Hosts are alias for IP addresses or subnets.
|
||||
type Hosts map[string]netaddr.IPPrefix
|
||||
type Hosts map[string]netip.Prefix
|
||||
|
||||
// TagOwners specify what users (namespaces?) are allow to use certain tags.
|
||||
type TagOwners map[string][]string
|
||||
|
||||
// ACLTest is not implemented, but should be use to check if a certain rule is allowed.
|
||||
type ACLTest struct {
|
||||
User string `json:"User" yaml:"User"`
|
||||
Allow []string `json:"Allow" yaml:"Allow"`
|
||||
Deny []string `json:"Deny,omitempty" yaml:"Deny,omitempty"`
|
||||
Source string `json:"src" yaml:"src"`
|
||||
Accept []string `json:"accept" yaml:"accept"`
|
||||
Deny []string `json:"deny,omitempty" yaml:"deny,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON allows to parse the Hosts directly into netaddr objects.
|
||||
// AutoApprovers specify which users (namespaces?), groups or tags have their advertised routes
|
||||
// or exit node status automatically enabled.
|
||||
type AutoApprovers struct {
|
||||
Routes map[string][]string `json:"routes" yaml:"routes"`
|
||||
ExitNode []string `json:"exitNode" yaml:"exitNode"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON allows to parse the Hosts directly into netip objects.
|
||||
func (hosts *Hosts) UnmarshalJSON(data []byte) error {
|
||||
newHosts := Hosts{}
|
||||
hostIPPrefixMap := make(map[string]string)
|
||||
@@ -59,7 +68,7 @@ func (hosts *Hosts) UnmarshalJSON(data []byte) error {
|
||||
if !strings.Contains(prefixStr, "/") {
|
||||
prefixStr += "/32"
|
||||
}
|
||||
prefix, err := netaddr.ParseIPPrefix(prefixStr)
|
||||
prefix, err := netip.ParsePrefix(prefixStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -70,7 +79,7 @@ func (hosts *Hosts) UnmarshalJSON(data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalYAML allows to parse the Hosts directly into netaddr objects.
|
||||
// UnmarshalYAML allows to parse the Hosts directly into netip objects.
|
||||
func (hosts *Hosts) UnmarshalYAML(data []byte) error {
|
||||
newHosts := Hosts{}
|
||||
hostIPPrefixMap := make(map[string]string)
|
||||
@@ -80,7 +89,7 @@ func (hosts *Hosts) UnmarshalYAML(data []byte) error {
|
||||
return err
|
||||
}
|
||||
for host, prefixStr := range hostIPPrefixMap {
|
||||
prefix, err := netaddr.ParseIPPrefix(prefixStr)
|
||||
prefix, err := netip.ParsePrefix(prefixStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -99,3 +108,28 @@ func (policy ACLPolicy) IsZero() bool {
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns the list of autoApproving namespaces, groups or tags for a given IPPrefix.
|
||||
func (autoApprovers *AutoApprovers) GetRouteApprovers(
|
||||
prefix netip.Prefix,
|
||||
) ([]string, error) {
|
||||
if prefix.Bits() == 0 {
|
||||
return autoApprovers.ExitNode, nil // 0.0.0.0/0, ::/0 or equivalent
|
||||
}
|
||||
|
||||
approverAliases := []string{}
|
||||
|
||||
for autoApprovedPrefix, autoApproverAliases := range autoApprovers.Routes {
|
||||
autoApprovedPrefix, err := netip.ParsePrefix(autoApprovedPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if prefix.Bits() >= autoApprovedPrefix.Bits() &&
|
||||
autoApprovedPrefix.Contains(prefix.Masked().Addr()) {
|
||||
approverAliases = append(approverAliases, autoApproverAliases...)
|
||||
}
|
||||
}
|
||||
|
||||
return approverAliases, nil
|
||||
}
|
||||
|
||||
695
api.go
695
api.go
@@ -2,25 +2,19 @@ package headscale
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
const (
|
||||
// TODO(juan): remove this once https://github.com/juanfont/headscale/issues/727 is fixed.
|
||||
registrationHoldoff = time.Second * 5
|
||||
reservedResponseHeaderSize = 4
|
||||
RegisterMethodAuthKey = "authkey"
|
||||
RegisterMethodOIDC = "oidc"
|
||||
@@ -30,14 +24,42 @@ const (
|
||||
)
|
||||
)
|
||||
|
||||
// KeyHandler provides the Headscale pub key
|
||||
// Listens in /key.
|
||||
func (h *Headscale) KeyHandler(ctx *gin.Context) {
|
||||
ctx.Data(
|
||||
http.StatusOK,
|
||||
"text/plain; charset=utf-8",
|
||||
[]byte(MachinePublicKeyStripPrefix(h.privateKey.Public())),
|
||||
)
|
||||
func (h *Headscale) HealthHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
respond := func(err error) {
|
||||
writer.Header().Set("Content-Type", "application/health+json; charset=utf-8")
|
||||
|
||||
res := struct {
|
||||
Status string `json:"status"`
|
||||
}{
|
||||
Status: "pass",
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
log.Error().Caller().Err(err).Msg("health check failed")
|
||||
res.Status = "fail"
|
||||
}
|
||||
|
||||
buf, err := json.Marshal(res)
|
||||
if err != nil {
|
||||
log.Error().Caller().Err(err).Msg("marshal failed")
|
||||
}
|
||||
_, err = writer.Write(buf)
|
||||
if err != nil {
|
||||
log.Error().Caller().Err(err).Msg("write failed")
|
||||
}
|
||||
}
|
||||
|
||||
if err := h.pingDB(req.Context()); err != nil {
|
||||
respond(err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
respond(nil)
|
||||
}
|
||||
|
||||
type registerWebAPITemplateConfig struct {
|
||||
@@ -62,600 +84,85 @@ var registerWebAPITemplate = template.Must(
|
||||
`))
|
||||
|
||||
// RegisterWebAPI shows a simple message in the browser to point to the CLI
|
||||
// Listens in /register.
|
||||
func (h *Headscale) RegisterWebAPI(ctx *gin.Context) {
|
||||
machineKeyStr := ctx.Query("key")
|
||||
if machineKeyStr == "" {
|
||||
ctx.String(http.StatusBadRequest, "Wrong params")
|
||||
// Listens in /register/:nkey.
|
||||
//
|
||||
// This is not part of the Tailscale control API, as we could send whatever URL
|
||||
// in the RegisterResponse.AuthURL field.
|
||||
func (h *Headscale) RegisterWebAPI(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
vars := mux.Vars(req)
|
||||
nodeKeyStr, ok := vars["nkey"]
|
||||
|
||||
if !NodePublicKeyRegex.Match([]byte(nodeKeyStr)) {
|
||||
log.Warn().Str("node_key", nodeKeyStr).Msg("Invalid node key passed to registration url")
|
||||
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusUnauthorized)
|
||||
_, err := writer.Write([]byte("Unauthorized"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// We need to make sure we dont open for XSS style injections, if the parameter that
|
||||
// is passed as a key is not parsable/validated as a NodePublic key, then fail to render
|
||||
// the template and log an error.
|
||||
var nodeKey key.NodePublic
|
||||
err := nodeKey.UnmarshalText(
|
||||
[]byte(NodePublicKeyEnsurePrefix(nodeKeyStr)),
|
||||
)
|
||||
|
||||
if !ok || nodeKeyStr == "" || err != nil {
|
||||
log.Warn().Err(err).Msg("Failed to parse incoming nodekey")
|
||||
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusBadRequest)
|
||||
_, err := writer.Write([]byte("Wrong params"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
var content bytes.Buffer
|
||||
if err := registerWebAPITemplate.Execute(&content, registerWebAPITemplateConfig{
|
||||
Key: machineKeyStr,
|
||||
Key: nodeKeyStr,
|
||||
}); err != nil {
|
||||
log.Error().
|
||||
Str("func", "RegisterWebAPI").
|
||||
Err(err).
|
||||
Msg("Could not render register web API template")
|
||||
ctx.Data(
|
||||
http.StatusInternalServerError,
|
||||
"text/html; charset=utf-8",
|
||||
[]byte("Could not render register web API template"),
|
||||
)
|
||||
}
|
||||
|
||||
ctx.Data(http.StatusOK, "text/html; charset=utf-8", content.Bytes())
|
||||
}
|
||||
|
||||
// RegistrationHandler handles the actual registration process of a machine
|
||||
// Endpoint /machine/:id.
|
||||
func (h *Headscale) RegistrationHandler(ctx *gin.Context) {
|
||||
body, _ := io.ReadAll(ctx.Request.Body)
|
||||
machineKeyStr := ctx.Param("id")
|
||||
|
||||
var machineKey key.MachinePublic
|
||||
err := machineKey.UnmarshalText([]byte(MachinePublicKeyEnsurePrefix(machineKeyStr)))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot parse machine key")
|
||||
machineRegistrations.WithLabelValues("unknown", "web", "error", "unknown").Inc()
|
||||
ctx.String(http.StatusInternalServerError, "Sad!")
|
||||
|
||||
return
|
||||
}
|
||||
req := tailcfg.RegisterRequest{}
|
||||
err = decode(body, &req, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot decode message")
|
||||
machineRegistrations.WithLabelValues("unknown", "web", "error", "unknown").Inc()
|
||||
ctx.String(http.StatusInternalServerError, "Very sad!")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now().UTC()
|
||||
machine, err := h.GetMachineByMachineKey(machineKey)
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
log.Info().Str("machine", req.Hostinfo.Hostname).Msg("New machine")
|
||||
|
||||
machineKeyStr := MachinePublicKeyStripPrefix(machineKey)
|
||||
|
||||
// If the machine has AuthKey set, handle registration via PreAuthKeys
|
||||
if req.Auth.AuthKey != "" {
|
||||
h.handleAuthKey(ctx, machineKey, req)
|
||||
|
||||
return
|
||||
}
|
||||
hname, err := NormalizeToFQDNRules(
|
||||
req.Hostinfo.Hostname,
|
||||
h.cfg.OIDC.StripEmaildomain,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "RegistrationHandler").
|
||||
Str("hostinfo.name", req.Hostinfo.Hostname).
|
||||
Err(err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// The machine did not have a key to authenticate, which means
|
||||
// that we rely on a method that calls back some how (OpenID or CLI)
|
||||
// We create the machine and then keep it around until a callback
|
||||
// happens
|
||||
newMachine := Machine{
|
||||
MachineKey: machineKeyStr,
|
||||
Name: hname,
|
||||
NodeKey: NodePublicKeyStripPrefix(req.NodeKey),
|
||||
LastSeen: &now,
|
||||
Expiry: &time.Time{},
|
||||
}
|
||||
|
||||
if !req.Expiry.IsZero() {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("machine", req.Hostinfo.Hostname).
|
||||
Time("expiry", req.Expiry).
|
||||
Msg("Non-zero expiry time requested")
|
||||
newMachine.Expiry = &req.Expiry
|
||||
}
|
||||
|
||||
h.registrationCache.Set(
|
||||
machineKeyStr,
|
||||
newMachine,
|
||||
registerCacheExpiration,
|
||||
)
|
||||
|
||||
h.handleMachineRegistrationNew(ctx, machineKey, req)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// The machine is already registered, so we need to pass through reauth or key update.
|
||||
if machine != nil {
|
||||
// If the NodeKey stored in headscale is the same as the key presented in a registration
|
||||
// request, then we have a node that is either:
|
||||
// - Trying to log out (sending a expiry in the past)
|
||||
// - A valid, registered machine, looking for the node map
|
||||
// - Expired machine wanting to reauthenticate
|
||||
if machine.NodeKey == NodePublicKeyStripPrefix(req.NodeKey) {
|
||||
// The client sends an Expiry in the past if the client is requesting to expire the key (aka logout)
|
||||
// https://github.com/tailscale/tailscale/blob/main/tailcfg/tailcfg.go#L648
|
||||
if !req.Expiry.IsZero() && req.Expiry.UTC().Before(now) {
|
||||
h.handleMachineLogOut(ctx, machineKey, *machine)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// If machine is not expired, and is register, we have a already accepted this machine,
|
||||
// let it proceed with a valid registration
|
||||
if !machine.isExpired() {
|
||||
h.handleMachineValidRegistration(ctx, machineKey, *machine)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The NodeKey we have matches OldNodeKey, which means this is a refresh after a key expiration
|
||||
if machine.NodeKey == NodePublicKeyStripPrefix(req.OldNodeKey) &&
|
||||
!machine.isExpired() {
|
||||
h.handleMachineRefreshKey(ctx, machineKey, req, *machine)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// The machine has expired
|
||||
h.handleMachineExpired(ctx, machineKey, req, *machine)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) getMapResponse(
|
||||
machineKey key.MachinePublic,
|
||||
req tailcfg.MapRequest,
|
||||
machine *Machine,
|
||||
) ([]byte, error) {
|
||||
log.Trace().
|
||||
Str("func", "getMapResponse").
|
||||
Str("machine", req.Hostinfo.Hostname).
|
||||
Msg("Creating Map response")
|
||||
node, err := machine.toNode(h.cfg.BaseDomain, h.cfg.DNSConfig, true)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "getMapResponse").
|
||||
Err(err).
|
||||
Msg("Cannot convert to node")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
peers, err := h.getValidPeers(machine)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "getMapResponse").
|
||||
Err(err).
|
||||
Msg("Cannot fetch peers")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
profiles := getMapResponseUserProfiles(*machine, peers)
|
||||
|
||||
nodePeers, err := peers.toNodes(h.cfg.BaseDomain, h.cfg.DNSConfig, true)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "getMapResponse").
|
||||
Err(err).
|
||||
Msg("Failed to convert peers to Tailscale nodes")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dnsConfig := getMapResponseDNSConfig(
|
||||
h.cfg.DNSConfig,
|
||||
h.cfg.BaseDomain,
|
||||
*machine,
|
||||
peers,
|
||||
)
|
||||
|
||||
resp := tailcfg.MapResponse{
|
||||
KeepAlive: false,
|
||||
Node: node,
|
||||
Peers: nodePeers,
|
||||
DNSConfig: dnsConfig,
|
||||
Domain: h.cfg.BaseDomain,
|
||||
PacketFilter: h.aclRules,
|
||||
DERPMap: h.DERPMap,
|
||||
UserProfiles: profiles,
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Str("func", "getMapResponse").
|
||||
Str("machine", req.Hostinfo.Hostname).
|
||||
// Interface("payload", resp).
|
||||
Msgf("Generated map response: %s", tailMapResponseToString(resp))
|
||||
|
||||
var respBody []byte
|
||||
if req.Compress == "zstd" {
|
||||
src, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "getMapResponse").
|
||||
Err(err).
|
||||
Msg("Failed to marshal response for the client")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
encoder, _ := zstd.NewWriter(nil)
|
||||
srcCompressed := encoder.EncodeAll(src, nil)
|
||||
respBody = h.privateKey.SealTo(machineKey, srcCompressed)
|
||||
} else {
|
||||
respBody, err = encode(resp, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// declare the incoming size on the first 4 bytes
|
||||
data := make([]byte, reservedResponseHeaderSize)
|
||||
binary.LittleEndian.PutUint32(data, uint32(len(respBody)))
|
||||
data = append(data, respBody...)
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) getMapKeepAliveResponse(
|
||||
machineKey key.MachinePublic,
|
||||
mapRequest tailcfg.MapRequest,
|
||||
) ([]byte, error) {
|
||||
mapResponse := tailcfg.MapResponse{
|
||||
KeepAlive: true,
|
||||
}
|
||||
var respBody []byte
|
||||
var err error
|
||||
if mapRequest.Compress == "zstd" {
|
||||
src, err := json.Marshal(mapResponse)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "getMapKeepAliveResponse").
|
||||
Err(err).
|
||||
Msg("Failed to marshal keepalive response for the client")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
encoder, _ := zstd.NewWriter(nil)
|
||||
srcCompressed := encoder.EncodeAll(src, nil)
|
||||
respBody = h.privateKey.SealTo(machineKey, srcCompressed)
|
||||
} else {
|
||||
respBody, err = encode(mapResponse, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
data := make([]byte, reservedResponseHeaderSize)
|
||||
binary.LittleEndian.PutUint32(data, uint32(len(respBody)))
|
||||
data = append(data, respBody...)
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) handleMachineLogOut(
|
||||
ctx *gin.Context,
|
||||
machineKey key.MachinePublic,
|
||||
machine Machine,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
log.Info().
|
||||
Str("machine", machine.Name).
|
||||
Msg("Client requested logout")
|
||||
|
||||
h.ExpireMachine(&machine)
|
||||
|
||||
resp.AuthURL = ""
|
||||
resp.MachineAuthorized = false
|
||||
resp.User = *machine.Namespace.toUser()
|
||||
respBody, err := encode(resp, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
ctx.String(http.StatusInternalServerError, "")
|
||||
|
||||
return
|
||||
}
|
||||
ctx.Data(http.StatusOK, "application/json; charset=utf-8", respBody)
|
||||
}
|
||||
|
||||
func (h *Headscale) handleMachineValidRegistration(
|
||||
ctx *gin.Context,
|
||||
machineKey key.MachinePublic,
|
||||
machine Machine,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
// The machine registration is valid, respond with redirect to /map
|
||||
log.Debug().
|
||||
Str("machine", machine.Name).
|
||||
Msg("Client is registered and we have the current NodeKey. All clear to /map")
|
||||
|
||||
resp.AuthURL = ""
|
||||
resp.MachineAuthorized = true
|
||||
resp.User = *machine.Namespace.toUser()
|
||||
resp.Login = *machine.Namespace.toLogin()
|
||||
|
||||
respBody, err := encode(resp, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
machineRegistrations.WithLabelValues("update", "web", "error", machine.Namespace.Name).
|
||||
Inc()
|
||||
ctx.String(http.StatusInternalServerError, "")
|
||||
|
||||
return
|
||||
}
|
||||
machineRegistrations.WithLabelValues("update", "web", "success", machine.Namespace.Name).
|
||||
Inc()
|
||||
ctx.Data(http.StatusOK, "application/json; charset=utf-8", respBody)
|
||||
}
|
||||
|
||||
func (h *Headscale) handleMachineExpired(
|
||||
ctx *gin.Context,
|
||||
machineKey key.MachinePublic,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
machine Machine,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
// The client has registered before, but has expired
|
||||
log.Debug().
|
||||
Str("machine", machine.Name).
|
||||
Msg("Machine registration has expired. Sending a authurl to register")
|
||||
|
||||
if registerRequest.Auth.AuthKey != "" {
|
||||
h.handleAuthKey(ctx, machineKey, registerRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if h.cfg.OIDC.Issuer != "" {
|
||||
resp.AuthURL = fmt.Sprintf("%s/oidc/register/%s",
|
||||
strings.TrimSuffix(h.cfg.ServerURL, "/"), machineKey.String())
|
||||
} else {
|
||||
resp.AuthURL = fmt.Sprintf("%s/register?key=%s",
|
||||
strings.TrimSuffix(h.cfg.ServerURL, "/"), machineKey.String())
|
||||
}
|
||||
|
||||
respBody, err := encode(resp, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
machineRegistrations.WithLabelValues("reauth", "web", "error", machine.Namespace.Name).
|
||||
Inc()
|
||||
ctx.String(http.StatusInternalServerError, "")
|
||||
|
||||
return
|
||||
}
|
||||
machineRegistrations.WithLabelValues("reauth", "web", "success", machine.Namespace.Name).
|
||||
Inc()
|
||||
ctx.Data(http.StatusOK, "application/json; charset=utf-8", respBody)
|
||||
}
|
||||
|
||||
func (h *Headscale) handleMachineRefreshKey(
|
||||
ctx *gin.Context,
|
||||
machineKey key.MachinePublic,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
machine Machine,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
log.Debug().
|
||||
Str("machine", machine.Name).
|
||||
Msg("We have the OldNodeKey in the database. This is a key refresh")
|
||||
machine.NodeKey = NodePublicKeyStripPrefix(registerRequest.NodeKey)
|
||||
h.db.Save(&machine)
|
||||
|
||||
resp.AuthURL = ""
|
||||
resp.User = *machine.Namespace.toUser()
|
||||
respBody, err := encode(resp, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
ctx.String(http.StatusInternalServerError, "Extremely sad!")
|
||||
|
||||
return
|
||||
}
|
||||
ctx.Data(http.StatusOK, "application/json; charset=utf-8", respBody)
|
||||
}
|
||||
|
||||
func (h *Headscale) handleMachineRegistrationNew(
|
||||
ctx *gin.Context,
|
||||
machineKey key.MachinePublic,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
// The machine registration is new, redirect the client to the registration URL
|
||||
log.Debug().
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Msg("The node is sending us a new NodeKey, sending auth url")
|
||||
if h.cfg.OIDC.Issuer != "" {
|
||||
resp.AuthURL = fmt.Sprintf(
|
||||
"%s/oidc/register/%s",
|
||||
strings.TrimSuffix(h.cfg.ServerURL, "/"),
|
||||
machineKey.String(),
|
||||
)
|
||||
} else {
|
||||
resp.AuthURL = fmt.Sprintf("%s/register?key=%s",
|
||||
strings.TrimSuffix(h.cfg.ServerURL, "/"), MachinePublicKeyStripPrefix(machineKey))
|
||||
}
|
||||
|
||||
respBody, err := encode(resp, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
ctx.String(http.StatusInternalServerError, "")
|
||||
|
||||
return
|
||||
}
|
||||
ctx.Data(http.StatusOK, "application/json; charset=utf-8", respBody)
|
||||
}
|
||||
|
||||
// TODO: check if any locks are needed around IP allocation.
|
||||
func (h *Headscale) handleAuthKey(
|
||||
ctx *gin.Context,
|
||||
machineKey key.MachinePublic,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
) {
|
||||
machineKeyStr := MachinePublicKeyStripPrefix(machineKey)
|
||||
|
||||
log.Debug().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Msgf("Processing auth key for %s", registerRequest.Hostinfo.Hostname)
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
pak, err := h.checkKeyValidity(registerRequest.Auth.AuthKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Err(err).
|
||||
Msg("Failed authentication via AuthKey")
|
||||
resp.MachineAuthorized = false
|
||||
respBody, err := encode(resp, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
ctx.String(http.StatusInternalServerError, "")
|
||||
machineRegistrations.WithLabelValues("new", RegisterMethodAuthKey, "error", pak.Namespace.Name).
|
||||
Inc()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Data(http.StatusUnauthorized, "application/json; charset=utf-8", respBody)
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Msg("Failed authentication via AuthKey")
|
||||
|
||||
if pak != nil {
|
||||
machineRegistrations.WithLabelValues("new", RegisterMethodAuthKey, "error", pak.Namespace.Name).
|
||||
Inc()
|
||||
} else {
|
||||
machineRegistrations.WithLabelValues("new", RegisterMethodAuthKey, "error", "unknown").Inc()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Msg("Authentication key was valid, proceeding to acquire IP addresses")
|
||||
|
||||
nodeKey := NodePublicKeyStripPrefix(registerRequest.NodeKey)
|
||||
|
||||
// retrieve machine information if it exist
|
||||
// The error is not important, because if it does not
|
||||
// exist, then this is a new machine and we will move
|
||||
// on to registration.
|
||||
machine, _ := h.GetMachineByMachineKey(machineKey)
|
||||
if machine != nil {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("machine", machine.Name).
|
||||
Msg("machine already registered, refreshing with new auth key")
|
||||
|
||||
machine.NodeKey = nodeKey
|
||||
machine.AuthKeyID = uint(pak.ID)
|
||||
h.RefreshMachine(machine, registerRequest.Expiry)
|
||||
} else {
|
||||
now := time.Now().UTC()
|
||||
machineToRegister := Machine{
|
||||
Name: registerRequest.Hostinfo.Hostname,
|
||||
NamespaceID: pak.Namespace.ID,
|
||||
MachineKey: machineKeyStr,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
Expiry: ®isterRequest.Expiry,
|
||||
NodeKey: nodeKey,
|
||||
LastSeen: &now,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
}
|
||||
|
||||
machine, err = h.RegisterMachine(
|
||||
machineToRegister,
|
||||
)
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
_, err = writer.Write([]byte("Could not render register web API template"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("could not register machine")
|
||||
machineRegistrations.WithLabelValues("new", RegisterMethodAuthKey, "error", pak.Namespace.Name).
|
||||
Inc()
|
||||
ctx.String(
|
||||
http.StatusInternalServerError,
|
||||
"could not register machine",
|
||||
)
|
||||
|
||||
return
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
}
|
||||
|
||||
h.UsePreAuthKey(pak)
|
||||
|
||||
resp.MachineAuthorized = true
|
||||
resp.User = *pak.Namespace.toUser()
|
||||
respBody, err := encode(resp, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
machineRegistrations.WithLabelValues("new", RegisterMethodAuthKey, "error", pak.Namespace.Name).
|
||||
Inc()
|
||||
ctx.String(http.StatusInternalServerError, "Extremely sad!")
|
||||
|
||||
return
|
||||
}
|
||||
machineRegistrations.WithLabelValues("new", RegisterMethodAuthKey, "success", pak.Namespace.Name).
|
||||
Inc()
|
||||
ctx.Data(http.StatusOK, "application/json; charset=utf-8", respBody)
|
||||
log.Info().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Str("ips", strings.Join(machine.IPAddresses.ToStringSlice(), ", ")).
|
||||
Msg("Successfully authenticated via AuthKey")
|
||||
|
||||
writer.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err = writer.Write(content.Bytes())
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
}
|
||||
|
||||
80
api_common.go
Normal file
80
api_common.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"github.com/rs/zerolog/log"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
func (h *Headscale) generateMapResponse(
|
||||
mapRequest tailcfg.MapRequest,
|
||||
machine *Machine,
|
||||
) (*tailcfg.MapResponse, error) {
|
||||
log.Trace().
|
||||
Str("func", "generateMapResponse").
|
||||
Str("machine", mapRequest.Hostinfo.Hostname).
|
||||
Msg("Creating Map response")
|
||||
node, err := machine.toNode(h.cfg.BaseDomain, h.cfg.DNSConfig)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "generateMapResponse").
|
||||
Err(err).
|
||||
Msg("Cannot convert to node")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
peers, err := h.getValidPeers(machine)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "generateMapResponse").
|
||||
Err(err).
|
||||
Msg("Cannot fetch peers")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
profiles := getMapResponseUserProfiles(*machine, peers)
|
||||
|
||||
nodePeers, err := peers.toNodes(h.cfg.BaseDomain, h.cfg.DNSConfig)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "generateMapResponse").
|
||||
Err(err).
|
||||
Msg("Failed to convert peers to Tailscale nodes")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dnsConfig := getMapResponseDNSConfig(
|
||||
h.cfg.DNSConfig,
|
||||
h.cfg.BaseDomain,
|
||||
*machine,
|
||||
peers,
|
||||
)
|
||||
|
||||
resp := tailcfg.MapResponse{
|
||||
KeepAlive: false,
|
||||
Node: node,
|
||||
Peers: nodePeers,
|
||||
DNSConfig: dnsConfig,
|
||||
Domain: h.cfg.BaseDomain,
|
||||
PacketFilter: h.aclRules,
|
||||
DERPMap: h.DERPMap,
|
||||
UserProfiles: profiles,
|
||||
Debug: &tailcfg.Debug{
|
||||
DisableLogTail: !h.cfg.LogTail.Enabled,
|
||||
RandomizeClientPort: h.cfg.RandomizeClientPort,
|
||||
},
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Str("func", "generateMapResponse").
|
||||
Str("machine", mapRequest.Hostinfo.Hostname).
|
||||
// Interface("payload", resp).
|
||||
Msgf("Generated map response: %s", tailMapResponseToString(resp))
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
23
api_key.go
23
api_key.go
@@ -13,9 +13,8 @@ import (
|
||||
const (
|
||||
apiPrefixLength = 7
|
||||
apiKeyLength = 32
|
||||
apiKeyParts = 2
|
||||
|
||||
errAPIKeyFailedToParse = Error("Failed to parse ApiKey")
|
||||
ErrAPIKeyFailedToParse = Error("Failed to parse ApiKey")
|
||||
)
|
||||
|
||||
// APIKey describes the datamodel for API keys used to remotely authenticate with
|
||||
@@ -57,7 +56,10 @@ func (h *Headscale) CreateAPIKey(
|
||||
Hash: hash,
|
||||
Expiration: expiration,
|
||||
}
|
||||
h.db.Save(&key)
|
||||
|
||||
if err := h.db.Save(&key).Error; err != nil {
|
||||
return "", nil, fmt.Errorf("failed to save API key to database: %w", err)
|
||||
}
|
||||
|
||||
return keyStr, &key, nil
|
||||
}
|
||||
@@ -112,9 +114,9 @@ func (h *Headscale) ExpireAPIKey(key *APIKey) error {
|
||||
}
|
||||
|
||||
func (h *Headscale) ValidateAPIKey(keyStr string) (bool, error) {
|
||||
prefix, hash, err := splitAPIKey(keyStr)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to validate api key: %w", err)
|
||||
prefix, hash, found := strings.Cut(keyStr, ".")
|
||||
if !found {
|
||||
return false, ErrAPIKeyFailedToParse
|
||||
}
|
||||
|
||||
key, err := h.GetAPIKey(prefix)
|
||||
@@ -133,15 +135,6 @@ func (h *Headscale) ValidateAPIKey(keyStr string) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func splitAPIKey(key string) (string, string, error) {
|
||||
parts := strings.Split(key, ".")
|
||||
if len(parts) != apiKeyParts {
|
||||
return "", "", errAPIKeyFailedToParse
|
||||
}
|
||||
|
||||
return parts[0], parts[1], nil
|
||||
}
|
||||
|
||||
func (key *APIKey) toProto() *v1.ApiKey {
|
||||
protoKey := v1.ApiKey{
|
||||
Id: key.ID,
|
||||
|
||||
556
app.go
556
app.go
@@ -6,10 +6,8 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sort"
|
||||
@@ -19,15 +17,16 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
"github.com/gin-gonic/gin"
|
||||
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
"github.com/gorilla/mux"
|
||||
grpcMiddleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/patrickmn/go-cache"
|
||||
zerolog "github.com/philip-bui/grpc-zerolog"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/puzpuzpuz/xsync/v2"
|
||||
zl "github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
ginprometheus "github.com/zsais/go-gin-prometheus"
|
||||
"golang.org/x/crypto/acme"
|
||||
"golang.org/x/crypto/acme/autocert"
|
||||
"golang.org/x/oauth2"
|
||||
@@ -41,7 +40,6 @@ import (
|
||||
"google.golang.org/grpc/reflection"
|
||||
"google.golang.org/grpc/status"
|
||||
"gorm.io/gorm"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/dnstype"
|
||||
"tailscale.com/types/key"
|
||||
@@ -56,12 +54,13 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
AuthPrefix = "Bearer "
|
||||
Postgres = "postgres"
|
||||
Sqlite = "sqlite3"
|
||||
updateInterval = 5000
|
||||
HTTPReadTimeout = 30 * time.Second
|
||||
privateKeyFileMode = 0o600
|
||||
AuthPrefix = "Bearer "
|
||||
Postgres = "postgres"
|
||||
Sqlite = "sqlite3"
|
||||
updateInterval = 5000
|
||||
HTTPReadTimeout = 30 * time.Second
|
||||
HTTPShutdownTimeout = 3 * time.Second
|
||||
privateKeyFileMode = 0o600
|
||||
|
||||
registerCacheExpiration = time.Minute * 15
|
||||
registerCacheCleanup = time.Minute * 20
|
||||
@@ -71,84 +70,17 @@ const (
|
||||
EnforcedClientAuth = "enforced"
|
||||
)
|
||||
|
||||
// Config contains the initial Headscale configuration.
|
||||
type Config struct {
|
||||
ServerURL string
|
||||
Addr string
|
||||
MetricsAddr string
|
||||
GRPCAddr string
|
||||
GRPCAllowInsecure bool
|
||||
EphemeralNodeInactivityTimeout time.Duration
|
||||
IPPrefixes []netaddr.IPPrefix
|
||||
PrivateKeyPath string
|
||||
BaseDomain string
|
||||
|
||||
DERP DERPConfig
|
||||
|
||||
DBtype string
|
||||
DBpath string
|
||||
DBhost string
|
||||
DBport int
|
||||
DBname string
|
||||
DBuser string
|
||||
DBpass string
|
||||
|
||||
TLSLetsEncryptListen string
|
||||
TLSLetsEncryptHostname string
|
||||
TLSLetsEncryptCacheDir string
|
||||
TLSLetsEncryptChallengeType string
|
||||
|
||||
TLSCertPath string
|
||||
TLSKeyPath string
|
||||
TLSClientAuthMode tls.ClientAuthType
|
||||
|
||||
ACMEURL string
|
||||
ACMEEmail string
|
||||
|
||||
DNSConfig *tailcfg.DNSConfig
|
||||
|
||||
UnixSocket string
|
||||
UnixSocketPermission fs.FileMode
|
||||
|
||||
OIDC OIDCConfig
|
||||
|
||||
CLI CLIConfig
|
||||
}
|
||||
|
||||
type OIDCConfig struct {
|
||||
Issuer string
|
||||
ClientID string
|
||||
ClientSecret string
|
||||
StripEmaildomain bool
|
||||
}
|
||||
|
||||
type DERPConfig struct {
|
||||
ServerEnabled bool
|
||||
ServerRegionID int
|
||||
ServerRegionCode string
|
||||
ServerRegionName string
|
||||
STUNAddr string
|
||||
URLs []url.URL
|
||||
Paths []string
|
||||
AutoUpdate bool
|
||||
UpdateFrequency time.Duration
|
||||
}
|
||||
|
||||
type CLIConfig struct {
|
||||
Address string
|
||||
APIKey string
|
||||
Timeout time.Duration
|
||||
Insecure bool
|
||||
}
|
||||
|
||||
// Headscale represents the base app of the service.
|
||||
type Headscale struct {
|
||||
cfg Config
|
||||
db *gorm.DB
|
||||
dbString string
|
||||
dbType string
|
||||
dbDebug bool
|
||||
privateKey *key.MachinePrivate
|
||||
cfg *Config
|
||||
db *gorm.DB
|
||||
dbString string
|
||||
dbType string
|
||||
dbDebug bool
|
||||
privateKey *key.MachinePrivate
|
||||
noisePrivateKey *key.MachinePrivate
|
||||
|
||||
noiseMux *mux.Router
|
||||
|
||||
DERPMap *tailcfg.DERPMap
|
||||
DERPServer *DERPServer
|
||||
@@ -156,7 +88,7 @@ type Headscale struct {
|
||||
aclPolicy *ACLPolicy
|
||||
aclRules []tailcfg.FilterRule
|
||||
|
||||
lastStateChange sync.Map
|
||||
lastStateChange *xsync.MapOf[string, time.Time]
|
||||
|
||||
oidcProvider *oidc.Provider
|
||||
oauth2Config *oauth2.Config
|
||||
@@ -164,6 +96,9 @@ type Headscale struct {
|
||||
registrationCache *cache.Cache
|
||||
|
||||
ipAllocationMutex sync.Mutex
|
||||
|
||||
shutdownChan chan struct{}
|
||||
pollNetMapStreamWG sync.WaitGroup
|
||||
}
|
||||
|
||||
// Look up the TLS constant relative to user-supplied TLS client
|
||||
@@ -187,23 +122,43 @@ func LookupTLSClientAuthMode(mode string) (tls.ClientAuthType, bool) {
|
||||
}
|
||||
}
|
||||
|
||||
func NewHeadscale(cfg Config) (*Headscale, error) {
|
||||
privKey, err := readOrCreatePrivateKey(cfg.PrivateKeyPath)
|
||||
func NewHeadscale(cfg *Config) (*Headscale, error) {
|
||||
privateKey, err := readOrCreatePrivateKey(cfg.PrivateKeyPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read or create private key: %w", err)
|
||||
}
|
||||
|
||||
// TS2021 requires to have a different key from the legacy protocol.
|
||||
noisePrivateKey, err := readOrCreatePrivateKey(cfg.NoisePrivateKeyPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read or create Noise protocol private key: %w", err)
|
||||
}
|
||||
|
||||
if privateKey.Equal(*noisePrivateKey) {
|
||||
return nil, fmt.Errorf("private key and noise private key are the same: %w", err)
|
||||
}
|
||||
|
||||
var dbString string
|
||||
switch cfg.DBtype {
|
||||
case Postgres:
|
||||
dbString = fmt.Sprintf(
|
||||
"host=%s port=%d dbname=%s user=%s password=%s sslmode=disable",
|
||||
"host=%s dbname=%s user=%s",
|
||||
cfg.DBhost,
|
||||
cfg.DBport,
|
||||
cfg.DBname,
|
||||
cfg.DBuser,
|
||||
cfg.DBpass,
|
||||
)
|
||||
|
||||
if !cfg.DBssl {
|
||||
dbString += " sslmode=disable"
|
||||
}
|
||||
|
||||
if cfg.DBport != 0 {
|
||||
dbString += fmt.Sprintf(" port=%d", cfg.DBport)
|
||||
}
|
||||
|
||||
if cfg.DBpass != "" {
|
||||
dbString += fmt.Sprintf(" password=%s", cfg.DBpass)
|
||||
}
|
||||
case Sqlite:
|
||||
dbString = cfg.DBpath
|
||||
default:
|
||||
@@ -216,12 +171,14 @@ func NewHeadscale(cfg Config) (*Headscale, error) {
|
||||
)
|
||||
|
||||
app := Headscale{
|
||||
cfg: cfg,
|
||||
dbType: cfg.DBtype,
|
||||
dbString: dbString,
|
||||
privateKey: privKey,
|
||||
aclRules: tailcfg.FilterAllowAll, // default allowall
|
||||
registrationCache: registrationCache,
|
||||
cfg: cfg,
|
||||
dbType: cfg.DBtype,
|
||||
dbString: dbString,
|
||||
privateKey: privateKey,
|
||||
noisePrivateKey: noisePrivateKey,
|
||||
aclRules: tailcfg.FilterAllowAll, // default allowall
|
||||
registrationCache: registrationCache,
|
||||
pollNetMapStreamWG: sync.WaitGroup{},
|
||||
}
|
||||
|
||||
err = app.initDB()
|
||||
@@ -232,7 +189,11 @@ func NewHeadscale(cfg Config) (*Headscale, error) {
|
||||
if cfg.OIDC.Issuer != "" {
|
||||
err = app.initOIDC()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if cfg.OIDC.OnlyStartIfOIDCIsAvailable {
|
||||
return nil, err
|
||||
} else {
|
||||
log.Warn().Err(err).Msg("failed to set up OIDC provider, falling back to CLI based authentication")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -240,7 +201,7 @@ func NewHeadscale(cfg Config) (*Headscale, error) {
|
||||
magicDNSDomains := generateMagicDNSRootDomains(app.cfg.IPPrefixes)
|
||||
// we might have routes already from Split DNS
|
||||
if app.cfg.DNSConfig.Routes == nil {
|
||||
app.cfg.DNSConfig.Routes = make(map[string][]dnstype.Resolver)
|
||||
app.cfg.DNSConfig.Routes = make(map[string][]*dnstype.Resolver)
|
||||
}
|
||||
for _, d := range magicDNSDomains {
|
||||
app.cfg.DNSConfig.Routes[d.WithoutTrailingDot()] = nil
|
||||
@@ -292,33 +253,38 @@ func (h *Headscale) expireEphemeralNodesWorker() {
|
||||
return
|
||||
}
|
||||
|
||||
expiredFound := false
|
||||
for _, machine := range machines {
|
||||
if machine.AuthKey != nil && machine.LastSeen != nil &&
|
||||
machine.AuthKey.Ephemeral &&
|
||||
time.Now().
|
||||
After(machine.LastSeen.Add(h.cfg.EphemeralNodeInactivityTimeout)) {
|
||||
expiredFound = true
|
||||
log.Info().
|
||||
Str("machine", machine.Name).
|
||||
Str("machine", machine.Hostname).
|
||||
Msg("Ephemeral client removed from database")
|
||||
|
||||
err = h.db.Unscoped().Delete(machine).Error
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("machine", machine.Name).
|
||||
Str("machine", machine.Hostname).
|
||||
Msg("🤮 Cannot delete ephemeral machine from the database")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
h.setLastStateChangeToNow(namespace.Name)
|
||||
if expiredFound {
|
||||
h.setLastStateChangeToNow()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
|
||||
req interface{},
|
||||
info *grpc.UnaryServerInfo,
|
||||
handler grpc.UnaryHandler) (interface{}, error) {
|
||||
handler grpc.UnaryHandler,
|
||||
) (interface{}, error) {
|
||||
// Check if the request is coming from the on-server client.
|
||||
// This is not secure, but it is to maintain maintainability
|
||||
// with the "legacy" database-based client
|
||||
@@ -393,48 +359,74 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
|
||||
return handler(ctx, req)
|
||||
}
|
||||
|
||||
func (h *Headscale) httpAuthenticationMiddleware(ctx *gin.Context) {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("client_address", ctx.ClientIP()).
|
||||
Msg("HTTP authentication invoked")
|
||||
|
||||
authHeader := ctx.GetHeader("authorization")
|
||||
|
||||
if !strings.HasPrefix(authHeader, AuthPrefix) {
|
||||
log.Error().
|
||||
func (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("client_address", ctx.ClientIP()).
|
||||
Msg(`missing "Bearer " prefix in "Authorization" header`)
|
||||
ctx.AbortWithStatus(http.StatusUnauthorized)
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg("HTTP authentication invoked")
|
||||
|
||||
return
|
||||
}
|
||||
authHeader := req.Header.Get("authorization")
|
||||
|
||||
valid, err := h.ValidateAPIKey(strings.TrimPrefix(authHeader, AuthPrefix))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Str("client_address", ctx.ClientIP()).
|
||||
Msg("failed to validate token")
|
||||
if !strings.HasPrefix(authHeader, AuthPrefix) {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg(`missing "Bearer " prefix in "Authorization" header`)
|
||||
writer.WriteHeader(http.StatusUnauthorized)
|
||||
_, err := writer.Write([]byte("Unauthorized"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
ctx.AbortWithStatus(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
valid, err := h.ValidateAPIKey(strings.TrimPrefix(authHeader, AuthPrefix))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg("failed to validate token")
|
||||
|
||||
if !valid {
|
||||
log.Info().
|
||||
Str("client_address", ctx.ClientIP()).
|
||||
Msg("invalid token")
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
_, err := writer.Write([]byte("Unauthorized"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
ctx.AbortWithStatus(http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
if !valid {
|
||||
log.Info().
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg("invalid token")
|
||||
|
||||
ctx.Next()
|
||||
writer.WriteHeader(http.StatusUnauthorized)
|
||||
_, err := writer.Write([]byte("Unauthorized"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(writer, req)
|
||||
})
|
||||
}
|
||||
|
||||
// ensureUnixSocketIsAbsent will check if the given path for headscales unix socket is clear
|
||||
@@ -448,48 +440,49 @@ func (h *Headscale) ensureUnixSocketIsAbsent() error {
|
||||
return os.Remove(h.cfg.UnixSocket)
|
||||
}
|
||||
|
||||
func (h *Headscale) createPrometheusRouter() *gin.Engine {
|
||||
promRouter := gin.Default()
|
||||
func (h *Headscale) createRouter(grpcMux *runtime.ServeMux) *mux.Router {
|
||||
router := mux.NewRouter()
|
||||
|
||||
prometheus := ginprometheus.NewPrometheus("gin")
|
||||
prometheus.Use(promRouter)
|
||||
router.HandleFunc(ts2021UpgradePath, h.NoiseUpgradeHandler).Methods(http.MethodPost)
|
||||
|
||||
return promRouter
|
||||
}
|
||||
router.HandleFunc("/health", h.HealthHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/key", h.KeyHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/register/{nkey}", h.RegisterWebAPI).Methods(http.MethodGet)
|
||||
h.addLegacyHandlers(router)
|
||||
|
||||
func (h *Headscale) createRouter(grpcMux *runtime.ServeMux) *gin.Engine {
|
||||
router := gin.Default()
|
||||
|
||||
router.GET(
|
||||
"/health",
|
||||
func(c *gin.Context) { c.JSON(http.StatusOK, gin.H{"healthy": "ok"}) },
|
||||
)
|
||||
router.GET("/key", h.KeyHandler)
|
||||
router.GET("/register", h.RegisterWebAPI)
|
||||
router.POST("/machine/:id/map", h.PollNetMapHandler)
|
||||
router.POST("/machine/:id", h.RegistrationHandler)
|
||||
router.GET("/oidc/register/:mkey", h.RegisterOIDC)
|
||||
router.GET("/oidc/callback", h.OIDCCallback)
|
||||
router.GET("/apple", h.AppleConfigMessage)
|
||||
router.GET("/apple/:platform", h.ApplePlatformConfig)
|
||||
router.GET("/windows", h.WindowsConfigMessage)
|
||||
router.GET("/windows/tailscale.reg", h.WindowsRegConfig)
|
||||
router.GET("/swagger", SwaggerUI)
|
||||
router.GET("/swagger/v1/openapiv2.json", SwaggerAPIv1)
|
||||
router.HandleFunc("/oidc/register/{nkey}", h.RegisterOIDC).Methods(http.MethodGet)
|
||||
router.HandleFunc("/oidc/callback", h.OIDCCallback).Methods(http.MethodGet)
|
||||
router.HandleFunc("/apple", h.AppleConfigMessage).Methods(http.MethodGet)
|
||||
router.HandleFunc("/apple/{platform}", h.ApplePlatformConfig).
|
||||
Methods(http.MethodGet)
|
||||
router.HandleFunc("/windows", h.WindowsConfigMessage).Methods(http.MethodGet)
|
||||
router.HandleFunc("/windows/tailscale.reg", h.WindowsRegConfig).
|
||||
Methods(http.MethodGet)
|
||||
router.HandleFunc("/swagger", SwaggerUI).Methods(http.MethodGet)
|
||||
router.HandleFunc("/swagger/v1/openapiv2.json", SwaggerAPIv1).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
if h.cfg.DERP.ServerEnabled {
|
||||
router.Any("/derp", h.DERPHandler)
|
||||
router.Any("/derp/probe", h.DERPProbeHandler)
|
||||
router.Any("/bootstrap-dns", h.DERPBootstrapDNSHandler)
|
||||
router.HandleFunc("/derp", h.DERPHandler)
|
||||
router.HandleFunc("/derp/probe", h.DERPProbeHandler)
|
||||
router.HandleFunc("/bootstrap-dns", h.DERPBootstrapDNSHandler)
|
||||
}
|
||||
|
||||
api := router.Group("/api")
|
||||
api.Use(h.httpAuthenticationMiddleware)
|
||||
{
|
||||
api.Any("/v1/*any", gin.WrapF(grpcMux.ServeHTTP))
|
||||
}
|
||||
apiRouter := router.PathPrefix("/api").Subrouter()
|
||||
apiRouter.Use(h.httpAuthenticationMiddleware)
|
||||
apiRouter.PathPrefix("/v1/").HandlerFunc(grpcMux.ServeHTTP)
|
||||
|
||||
router.NoRoute(stdoutHandler)
|
||||
router.PathPrefix("/").HandlerFunc(stdoutHandler)
|
||||
|
||||
return router
|
||||
}
|
||||
|
||||
func (h *Headscale) createNoiseMux() *mux.Router {
|
||||
router := mux.NewRouter()
|
||||
|
||||
router.HandleFunc("/machine/register", h.NoiseRegistrationHandler).
|
||||
Methods(http.MethodPost)
|
||||
router.HandleFunc("/machine/map", h.NoisePollNetMapHandler)
|
||||
|
||||
return router
|
||||
}
|
||||
@@ -552,19 +545,6 @@ func (h *Headscale) Serve() error {
|
||||
return fmt.Errorf("failed change permission of gRPC socket: %w", err)
|
||||
}
|
||||
|
||||
// Handle common process-killing signals so we can gracefully shut down:
|
||||
sigc := make(chan os.Signal, 1)
|
||||
signal.Notify(sigc, os.Interrupt, syscall.SIGTERM)
|
||||
go func(c chan os.Signal) {
|
||||
// Wait for a SIGINT or SIGKILL:
|
||||
sig := <-c
|
||||
log.Printf("Caught signal %s: shutting down.", sig)
|
||||
// Stop listening (and unlink the socket if unix type):
|
||||
socketListener.Close()
|
||||
// And we're done:
|
||||
os.Exit(0)
|
||||
}(sigc)
|
||||
|
||||
grpcGatewayMux := runtime.NewServeMux()
|
||||
|
||||
// Make the grpc-gateway connect to grpc over socket
|
||||
@@ -618,12 +598,14 @@ func (h *Headscale) Serve() error {
|
||||
// https://github.com/soheilhy/cmux/issues/68
|
||||
// https://github.com/soheilhy/cmux/issues/91
|
||||
|
||||
var grpcServer *grpc.Server
|
||||
var grpcListener net.Listener
|
||||
if tlsConfig != nil || h.cfg.GRPCAllowInsecure {
|
||||
log.Info().Msgf("Enabling remote gRPC at %s", h.cfg.GRPCAddr)
|
||||
|
||||
grpcOptions := []grpc.ServerOption{
|
||||
grpc.UnaryInterceptor(
|
||||
grpc_middleware.ChainUnaryServer(
|
||||
grpcMiddleware.ChainUnaryServer(
|
||||
h.grpcAuthenticationInterceptor,
|
||||
zerolog.NewUnaryServerInterceptor(),
|
||||
),
|
||||
@@ -638,12 +620,12 @@ func (h *Headscale) Serve() error {
|
||||
log.Warn().Msg("gRPC is running without security")
|
||||
}
|
||||
|
||||
grpcServer := grpc.NewServer(grpcOptions...)
|
||||
grpcServer = grpc.NewServer(grpcOptions...)
|
||||
|
||||
v1.RegisterHeadscaleServiceServer(grpcServer, newHeadscaleV1APIServer(h))
|
||||
reflection.Register(grpcServer)
|
||||
|
||||
grpcListener, err := net.Listen("tcp", h.cfg.GRPCAddr)
|
||||
grpcListener, err = net.Listen("tcp", h.cfg.GRPCAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to bind to TCP address: %w", err)
|
||||
}
|
||||
@@ -658,9 +640,16 @@ func (h *Headscale) Serve() error {
|
||||
//
|
||||
// HTTP setup
|
||||
//
|
||||
|
||||
// This is the regular router that we expose
|
||||
// over our main Addr. It also serves the legacy Tailcale API
|
||||
router := h.createRouter(grpcGatewayMux)
|
||||
|
||||
// This router is served only over the Noise connection, and exposes only the new API.
|
||||
//
|
||||
// The HTTP2 server that exposes this router is created for
|
||||
// a single hijacked connection from /ts2021, using netutil.NewOneConnListener
|
||||
h.noiseMux = h.createNoiseMux()
|
||||
|
||||
httpServer := &http.Server{
|
||||
Addr: h.cfg.Addr,
|
||||
Handler: router,
|
||||
@@ -688,11 +677,12 @@ func (h *Headscale) Serve() error {
|
||||
log.Info().
|
||||
Msgf("listening and serving HTTP on: %s", h.cfg.Addr)
|
||||
|
||||
promRouter := h.createPrometheusRouter()
|
||||
promMux := http.NewServeMux()
|
||||
promMux.Handle("/metrics", promhttp.Handler())
|
||||
|
||||
promHTTPServer := &http.Server{
|
||||
Addr: h.cfg.MetricsAddr,
|
||||
Handler: promRouter,
|
||||
Handler: promMux,
|
||||
ReadTimeout: HTTPReadTimeout,
|
||||
WriteTimeout: 0,
|
||||
}
|
||||
@@ -709,12 +699,105 @@ func (h *Headscale) Serve() error {
|
||||
log.Info().
|
||||
Msgf("listening and serving metrics on: %s", h.cfg.MetricsAddr)
|
||||
|
||||
// Handle common process-killing signals so we can gracefully shut down:
|
||||
h.shutdownChan = make(chan struct{})
|
||||
sigc := make(chan os.Signal, 1)
|
||||
signal.Notify(sigc,
|
||||
syscall.SIGHUP,
|
||||
syscall.SIGINT,
|
||||
syscall.SIGTERM,
|
||||
syscall.SIGQUIT,
|
||||
syscall.SIGHUP)
|
||||
sigFunc := func(c chan os.Signal) {
|
||||
// Wait for a SIGINT or SIGKILL:
|
||||
for {
|
||||
sig := <-c
|
||||
switch sig {
|
||||
case syscall.SIGHUP:
|
||||
log.Info().
|
||||
Str("signal", sig.String()).
|
||||
Msg("Received SIGHUP, reloading ACL and Config")
|
||||
|
||||
// TODO(kradalby): Reload config on SIGHUP
|
||||
|
||||
if h.cfg.ACL.PolicyPath != "" {
|
||||
aclPath := AbsolutePathFromConfigPath(h.cfg.ACL.PolicyPath)
|
||||
err := h.LoadACLPolicy(aclPath)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Failed to reload ACL policy")
|
||||
}
|
||||
log.Info().
|
||||
Str("path", aclPath).
|
||||
Msg("ACL policy successfully reloaded, notifying nodes of change")
|
||||
|
||||
h.setLastStateChangeToNow()
|
||||
}
|
||||
|
||||
default:
|
||||
log.Info().
|
||||
Str("signal", sig.String()).
|
||||
Msg("Received signal to stop, shutting down gracefully")
|
||||
|
||||
close(h.shutdownChan)
|
||||
h.pollNetMapStreamWG.Wait()
|
||||
|
||||
// Gracefully shut down servers
|
||||
ctx, cancel := context.WithTimeout(
|
||||
context.Background(),
|
||||
HTTPShutdownTimeout,
|
||||
)
|
||||
if err := promHTTPServer.Shutdown(ctx); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to shutdown prometheus http")
|
||||
}
|
||||
if err := httpServer.Shutdown(ctx); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to shutdown http")
|
||||
}
|
||||
grpcSocket.GracefulStop()
|
||||
|
||||
if grpcServer != nil {
|
||||
grpcServer.GracefulStop()
|
||||
grpcListener.Close()
|
||||
}
|
||||
|
||||
// Close network listeners
|
||||
promHTTPListener.Close()
|
||||
httpListener.Close()
|
||||
grpcGatewayConn.Close()
|
||||
|
||||
// Stop listening (and unlink the socket if unix type):
|
||||
socketListener.Close()
|
||||
|
||||
// Close db connections
|
||||
db, err := h.db.DB()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Failed to get db handle")
|
||||
}
|
||||
err = db.Close()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Failed to close db")
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Msg("Headscale stopped")
|
||||
|
||||
// And we're done:
|
||||
cancel()
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
errorGroup.Go(func() error {
|
||||
sigFunc(sigc)
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return errorGroup.Wait()
|
||||
}
|
||||
|
||||
func (h *Headscale) getTLSSettings() (*tls.Config, error) {
|
||||
var err error
|
||||
if h.cfg.TLSLetsEncryptHostname != "" {
|
||||
if h.cfg.TLS.LetsEncrypt.Hostname != "" {
|
||||
if !strings.HasPrefix(h.cfg.ServerURL, "https://") {
|
||||
log.Warn().
|
||||
Msg("Listening with TLS but ServerURL does not start with https://")
|
||||
@@ -722,29 +805,37 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) {
|
||||
|
||||
certManager := autocert.Manager{
|
||||
Prompt: autocert.AcceptTOS,
|
||||
HostPolicy: autocert.HostWhitelist(h.cfg.TLSLetsEncryptHostname),
|
||||
Cache: autocert.DirCache(h.cfg.TLSLetsEncryptCacheDir),
|
||||
HostPolicy: autocert.HostWhitelist(h.cfg.TLS.LetsEncrypt.Hostname),
|
||||
Cache: autocert.DirCache(h.cfg.TLS.LetsEncrypt.CacheDir),
|
||||
Client: &acme.Client{
|
||||
DirectoryURL: h.cfg.ACMEURL,
|
||||
},
|
||||
Email: h.cfg.ACMEEmail,
|
||||
}
|
||||
|
||||
switch h.cfg.TLSLetsEncryptChallengeType {
|
||||
case "TLS-ALPN-01":
|
||||
switch h.cfg.TLS.LetsEncrypt.ChallengeType {
|
||||
case tlsALPN01ChallengeType:
|
||||
// Configuration via autocert with TLS-ALPN-01 (https://tools.ietf.org/html/rfc8737)
|
||||
// The RFC requires that the validation is done on port 443; in other words, headscale
|
||||
// must be reachable on port 443.
|
||||
return certManager.TLSConfig(), nil
|
||||
|
||||
case "HTTP-01":
|
||||
case http01ChallengeType:
|
||||
// Configuration via autocert with HTTP-01. This requires listening on
|
||||
// port 80 for the certificate validation in addition to the headscale
|
||||
// service, which can be configured to run on any other port.
|
||||
|
||||
server := &http.Server{
|
||||
Addr: h.cfg.TLS.LetsEncrypt.Listen,
|
||||
Handler: certManager.HTTPHandler(http.HandlerFunc(h.redirect)),
|
||||
ReadTimeout: HTTPReadTimeout,
|
||||
}
|
||||
|
||||
go func() {
|
||||
err := server.ListenAndServe()
|
||||
log.Fatal().
|
||||
Caller().
|
||||
Err(http.ListenAndServe(h.cfg.TLSLetsEncryptListen, certManager.HTTPHandler(http.HandlerFunc(h.redirect)))).
|
||||
Err(err).
|
||||
Msg("failed to set up a HTTP server")
|
||||
}()
|
||||
|
||||
@@ -753,7 +844,7 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) {
|
||||
default:
|
||||
return nil, errUnsupportedLetsEncryptChallengeType
|
||||
}
|
||||
} else if h.cfg.TLSCertPath == "" {
|
||||
} else if h.cfg.TLS.CertPath == "" {
|
||||
if !strings.HasPrefix(h.cfg.ServerURL, "http://") {
|
||||
log.Warn().Msg("Listening without TLS but ServerURL does not start with http://")
|
||||
}
|
||||
@@ -766,36 +857,60 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) {
|
||||
|
||||
log.Info().Msg(fmt.Sprintf(
|
||||
"Client authentication (mTLS) is \"%s\". See the docs to learn about configuring this setting.",
|
||||
h.cfg.TLSClientAuthMode))
|
||||
h.cfg.TLS.ClientAuthMode))
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
ClientAuth: h.cfg.TLSClientAuthMode,
|
||||
ClientAuth: h.cfg.TLS.ClientAuthMode,
|
||||
NextProtos: []string{"http/1.1"},
|
||||
Certificates: make([]tls.Certificate, 1),
|
||||
MinVersion: tls.VersionTLS12,
|
||||
}
|
||||
|
||||
tlsConfig.Certificates[0], err = tls.LoadX509KeyPair(h.cfg.TLSCertPath, h.cfg.TLSKeyPath)
|
||||
tlsConfig.Certificates[0], err = tls.LoadX509KeyPair(h.cfg.TLS.CertPath, h.cfg.TLS.KeyPath)
|
||||
|
||||
return tlsConfig, err
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) setLastStateChangeToNow(namespace string) {
|
||||
now := time.Now().UTC()
|
||||
lastStateUpdate.WithLabelValues("", "headscale").Set(float64(now.Unix()))
|
||||
h.lastStateChange.Store(namespace, now)
|
||||
}
|
||||
func (h *Headscale) setLastStateChangeToNow() {
|
||||
var err error
|
||||
|
||||
func (h *Headscale) getLastStateChange(namespaces ...string) time.Time {
|
||||
times := []time.Time{}
|
||||
now := time.Now().UTC()
|
||||
|
||||
namespaces, err := h.ListNamespaces()
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("failed to fetch all namespaces, failing to update last changed state.")
|
||||
}
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
if wrapped, ok := h.lastStateChange.Load(namespace); ok {
|
||||
lastChange, _ := wrapped.(time.Time)
|
||||
|
||||
times = append(times, lastChange)
|
||||
lastStateUpdate.WithLabelValues(namespace.Name, "headscale").Set(float64(now.Unix()))
|
||||
if h.lastStateChange == nil {
|
||||
h.lastStateChange = xsync.NewMapOf[time.Time]()
|
||||
}
|
||||
h.lastStateChange.Store(namespace.Name, now)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) getLastStateChange(namespaces ...Namespace) time.Time {
|
||||
times := []time.Time{}
|
||||
|
||||
// getLastStateChange takes a list of namespaces as a "filter", if no namespaces
|
||||
// are past, then use the entier list of namespaces and look for the last update
|
||||
if len(namespaces) > 0 {
|
||||
for _, namespace := range namespaces {
|
||||
if lastChange, ok := h.lastStateChange.Load(namespace.Name); ok {
|
||||
times = append(times, lastChange)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
h.lastStateChange.Range(func(key string, value time.Time) bool {
|
||||
times = append(times, value)
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
sort.Slice(times, func(i, j int) bool {
|
||||
@@ -811,13 +926,16 @@ func (h *Headscale) getLastStateChange(namespaces ...string) time.Time {
|
||||
}
|
||||
}
|
||||
|
||||
func stdoutHandler(ctx *gin.Context) {
|
||||
body, _ := io.ReadAll(ctx.Request.Body)
|
||||
func stdoutHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
body, _ := io.ReadAll(req.Body)
|
||||
|
||||
log.Trace().
|
||||
Interface("header", ctx.Request.Header).
|
||||
Interface("proto", ctx.Request.Proto).
|
||||
Interface("url", ctx.Request.URL).
|
||||
Interface("header", req.Header).
|
||||
Interface("proto", req.Proto).
|
||||
Interface("url", req.URL).
|
||||
Bytes("body", body).
|
||||
Msg("Request did not match")
|
||||
}
|
||||
|
||||
11
app_test.go
11
app_test.go
@@ -1,12 +1,11 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/netip"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
"inet.af/netaddr"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) {
|
||||
@@ -35,18 +34,18 @@ func (s *Suite) ResetDB(c *check.C) {
|
||||
os.RemoveAll(tmpDir)
|
||||
}
|
||||
var err error
|
||||
tmpDir, err = ioutil.TempDir("", "autoygg-client-test")
|
||||
tmpDir, err = os.MkdirTemp("", "autoygg-client-test")
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
cfg := Config{
|
||||
IPPrefixes: []netaddr.IPPrefix{
|
||||
netaddr.MustParseIPPrefix("10.27.0.0/23"),
|
||||
IPPrefixes: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.27.0.0/23"),
|
||||
},
|
||||
}
|
||||
|
||||
app = Headscale{
|
||||
cfg: cfg,
|
||||
cfg: &cfg,
|
||||
dbType: "sqlite3",
|
||||
dbString: tmpDir + "/headscale_test.db",
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -15,7 +16,7 @@ import (
|
||||
|
||||
const (
|
||||
// 90 days.
|
||||
DefaultAPIKeyExpiry = 90 * 24 * time.Hour
|
||||
DefaultAPIKeyExpiry = "90d"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -23,7 +24,7 @@ func init() {
|
||||
apiKeysCmd.AddCommand(listAPIKeys)
|
||||
|
||||
createAPIKeyCmd.Flags().
|
||||
DurationP("expiration", "e", DefaultAPIKeyExpiry, "Human-readable expiration of the key (e.g. 30m, 24h)")
|
||||
StringP("expiration", "e", DefaultAPIKeyExpiry, "Human-readable expiration of the key (e.g. 30m, 24h)")
|
||||
|
||||
apiKeysCmd.AddCommand(createAPIKeyCmd)
|
||||
|
||||
@@ -118,10 +119,24 @@ If you loose a key, create a new one and revoke (expire) the old one.`,
|
||||
|
||||
request := &v1.CreateApiKeyRequest{}
|
||||
|
||||
duration, _ := cmd.Flags().GetDuration("expiration")
|
||||
expiration := time.Now().UTC().Add(duration)
|
||||
durationStr, _ := cmd.Flags().GetString("expiration")
|
||||
|
||||
log.Trace().Dur("expiration", duration).Msg("expiration has been set")
|
||||
duration, err := model.ParseDuration(durationStr)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Could not parse duration: %s\n", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
expiration := time.Now().UTC().Add(time.Duration(duration))
|
||||
|
||||
log.Trace().
|
||||
Dur("expiration", time.Duration(duration)).
|
||||
Msg("expiration has been set")
|
||||
|
||||
request.Expiration = timestamppb.New(expiration)
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package cli
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -10,8 +11,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
keyLength = 64
|
||||
errPreAuthKeyTooShort = Error("key too short, must be 64 hexadecimal characters")
|
||||
errPreAuthKeyMalformed = Error("key is malformed. expected 64 hex characters with `nodekey` prefix")
|
||||
)
|
||||
|
||||
// Error is used to compare errors as per https://dave.cheney.net/2016/04/07/constant-errors
|
||||
@@ -87,8 +87,8 @@ var createNodeCmd = &cobra.Command{
|
||||
|
||||
return
|
||||
}
|
||||
if len(machineKey) != keyLength {
|
||||
err = errPreAuthKeyTooShort
|
||||
if !headscale.NodePublicKeyRegex.Match([]byte(machineKey)) {
|
||||
err = errPreAuthKeyMalformed
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error: %s", err),
|
||||
|
||||
28
cmd/headscale/cli/dump_config.go
Normal file
28
cmd/headscale/cli/dump_config.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(dumpConfigCmd)
|
||||
}
|
||||
|
||||
var dumpConfigCmd = &cobra.Command{
|
||||
Use: "dumpConfig",
|
||||
Short: "dump current config to /etc/headscale/config.dump.yaml, integration test only",
|
||||
Hidden: true,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := viper.WriteConfigAs("/etc/headscale/config.dump.yaml")
|
||||
if err != nil {
|
||||
//nolint
|
||||
fmt.Println("Failed to dump config")
|
||||
}
|
||||
},
|
||||
}
|
||||
104
cmd/headscale/cli/mockoidc.go
Normal file
104
cmd/headscale/cli/mockoidc.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/oauth2-proxy/mockoidc"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
errMockOidcClientIDNotDefined = Error("MOCKOIDC_CLIENT_ID not defined")
|
||||
errMockOidcClientSecretNotDefined = Error("MOCKOIDC_CLIENT_SECRET not defined")
|
||||
errMockOidcPortNotDefined = Error("MOCKOIDC_PORT not defined")
|
||||
accessTTL = 10 * time.Minute
|
||||
refreshTTL = 60 * time.Minute
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(mockOidcCmd)
|
||||
}
|
||||
|
||||
var mockOidcCmd = &cobra.Command{
|
||||
Use: "mockoidc",
|
||||
Short: "Runs a mock OIDC server for testing",
|
||||
Long: "This internal command runs a OpenID Connect for testing purposes",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := mockOIDC()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Error running mock OIDC server")
|
||||
os.Exit(1)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func mockOIDC() error {
|
||||
clientID := os.Getenv("MOCKOIDC_CLIENT_ID")
|
||||
if clientID == "" {
|
||||
return errMockOidcClientIDNotDefined
|
||||
}
|
||||
clientSecret := os.Getenv("MOCKOIDC_CLIENT_SECRET")
|
||||
if clientSecret == "" {
|
||||
return errMockOidcClientSecretNotDefined
|
||||
}
|
||||
addrStr := os.Getenv("MOCKOIDC_ADDR")
|
||||
if addrStr == "" {
|
||||
return errMockOidcPortNotDefined
|
||||
}
|
||||
portStr := os.Getenv("MOCKOIDC_PORT")
|
||||
if portStr == "" {
|
||||
return errMockOidcPortNotDefined
|
||||
}
|
||||
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mock, err := getMockOIDC(clientID, clientSecret)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", addrStr, port))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = mock.Start(listener, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info().Msgf("Mock OIDC server listening on %s", listener.Addr().String())
|
||||
log.Info().Msgf("Issuer: %s", mock.Issuer())
|
||||
c := make(chan struct{})
|
||||
<-c
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getMockOIDC(clientID string, clientSecret string) (*mockoidc.MockOIDC, error) {
|
||||
keypair, err := mockoidc.NewKeypair(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mock := mockoidc.MockOIDC{
|
||||
ClientID: clientID,
|
||||
ClientSecret: clientSecret,
|
||||
AccessTTL: accessTTL,
|
||||
RefreshTTL: refreshTTL,
|
||||
CodeChallengeMethodsSupported: []string{"plain", "S256"},
|
||||
Keypair: keypair,
|
||||
SessionStore: mockoidc.NewSessionStore(),
|
||||
UserQueue: &mockoidc.UserQueue{},
|
||||
ErrorQueue: &mockoidc.ErrorQueue{},
|
||||
}
|
||||
|
||||
return &mock, nil
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package cli
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/netip"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -19,6 +20,7 @@ import (
|
||||
func init() {
|
||||
rootCmd.AddCommand(nodeCmd)
|
||||
listNodesCmd.Flags().StringP("namespace", "n", "", "Filter by namespace")
|
||||
listNodesCmd.Flags().BoolP("tags", "t", false, "Show tags")
|
||||
nodeCmd.AddCommand(listNodesCmd)
|
||||
|
||||
registerNodeCmd.Flags().StringP("namespace", "n", "", "Namespace")
|
||||
@@ -40,12 +42,44 @@ func init() {
|
||||
}
|
||||
nodeCmd.AddCommand(expireNodeCmd)
|
||||
|
||||
renameNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err = renameNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(renameNodeCmd)
|
||||
|
||||
deleteNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err = deleteNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(deleteNodeCmd)
|
||||
|
||||
moveNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
|
||||
err = moveNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
moveNodeCmd.Flags().StringP("namespace", "n", "", "New namespace")
|
||||
|
||||
err = moveNodeCmd.MarkFlagRequired("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(moveNodeCmd)
|
||||
|
||||
tagCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
|
||||
err = tagCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
tagCmd.Flags().
|
||||
StringSliceP("tags", "t", []string{}, "List of tags to add to the node")
|
||||
nodeCmd.AddCommand(tagCmd)
|
||||
}
|
||||
|
||||
var nodeCmd = &cobra.Command{
|
||||
@@ -74,7 +108,7 @@ var registerNodeCmd = &cobra.Command{
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting machine key from flag: %s", err),
|
||||
fmt.Sprintf("Error getting node key from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
@@ -100,7 +134,7 @@ var registerNodeCmd = &cobra.Command{
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Machine, "Machine register", output)
|
||||
SuccessOutput(response.Machine, fmt.Sprintf("Machine %s registered", response.Machine.GivenName), output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -116,6 +150,12 @@ var listNodesCmd = &cobra.Command{
|
||||
|
||||
return
|
||||
}
|
||||
showTags, err := cmd.Flags().GetBool("tags")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting tags flag: %s", err), output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
@@ -142,7 +182,7 @@ var listNodesCmd = &cobra.Command{
|
||||
return
|
||||
}
|
||||
|
||||
tableData, err := nodesToPtables(namespace, response.Machines)
|
||||
tableData, err := nodesToPtables(namespace, showTags, response.Machines)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
|
||||
@@ -207,6 +247,54 @@ var expireNodeCmd = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var renameNodeCmd = &cobra.Command{
|
||||
Use: "rename NEW_NAME",
|
||||
Short: "Renames a machine in your network",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
newName := ""
|
||||
if len(args) > 0 {
|
||||
newName = args[0]
|
||||
}
|
||||
request := &v1.RenameMachineRequest{
|
||||
MachineId: identifier,
|
||||
NewName: newName,
|
||||
}
|
||||
|
||||
response, err := client.RenameMachine(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot rename machine: %s\n",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Machine, "Machine renamed", output)
|
||||
},
|
||||
}
|
||||
|
||||
var deleteNodeCmd = &cobra.Command{
|
||||
Use: "delete",
|
||||
Short: "Delete a node",
|
||||
@@ -296,23 +384,105 @@ var deleteNodeCmd = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var moveNodeCmd = &cobra.Command{
|
||||
Use: "move",
|
||||
Short: "Move node to another namespace",
|
||||
Aliases: []string{"mv"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
namespace, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting namespace: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
getRequest := &v1.GetMachineRequest{
|
||||
MachineId: identifier,
|
||||
}
|
||||
|
||||
_, err = client.GetMachine(ctx, getRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Error getting node: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
moveRequest := &v1.MoveMachineRequest{
|
||||
MachineId: identifier,
|
||||
Namespace: namespace,
|
||||
}
|
||||
|
||||
moveResponse, err := client.MoveMachine(ctx, moveRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Error moving node: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(moveResponse.Machine, "Node moved to another namespace", output)
|
||||
},
|
||||
}
|
||||
|
||||
func nodesToPtables(
|
||||
currentNamespace string,
|
||||
showTags bool,
|
||||
machines []*v1.Machine,
|
||||
) (pterm.TableData, error) {
|
||||
tableData := pterm.TableData{
|
||||
{
|
||||
"ID",
|
||||
"Name",
|
||||
"NodeKey",
|
||||
"Namespace",
|
||||
"IP addresses",
|
||||
"Ephemeral",
|
||||
"Last seen",
|
||||
"Online",
|
||||
"Expired",
|
||||
},
|
||||
tableHeader := []string{
|
||||
"ID",
|
||||
"Hostname",
|
||||
"Name",
|
||||
"NodeKey",
|
||||
"Namespace",
|
||||
"IP addresses",
|
||||
"Ephemeral",
|
||||
"Last seen",
|
||||
"Online",
|
||||
"Expired",
|
||||
}
|
||||
if showTags {
|
||||
tableHeader = append(tableHeader, []string{
|
||||
"ForcedTags",
|
||||
"InvalidTags",
|
||||
"ValidTags",
|
||||
}...)
|
||||
}
|
||||
tableData := pterm.TableData{tableHeader}
|
||||
|
||||
for _, machine := range machines {
|
||||
var ephemeral bool
|
||||
@@ -356,6 +526,26 @@ func nodesToPtables(
|
||||
expired = pterm.LightRed("yes")
|
||||
}
|
||||
|
||||
var forcedTags string
|
||||
for _, tag := range machine.ForcedTags {
|
||||
forcedTags += "," + tag
|
||||
}
|
||||
forcedTags = strings.TrimLeft(forcedTags, ",")
|
||||
var invalidTags string
|
||||
for _, tag := range machine.InvalidTags {
|
||||
if !contains(machine.ForcedTags, tag) {
|
||||
invalidTags += "," + pterm.LightRed(tag)
|
||||
}
|
||||
}
|
||||
invalidTags = strings.TrimLeft(invalidTags, ",")
|
||||
var validTags string
|
||||
for _, tag := range machine.ValidTags {
|
||||
if !contains(machine.ForcedTags, tag) {
|
||||
validTags += "," + pterm.LightGreen(tag)
|
||||
}
|
||||
}
|
||||
validTags = strings.TrimLeft(validTags, ",")
|
||||
|
||||
var namespace string
|
||||
if currentNamespace == "" || (currentNamespace == machine.Namespace.Name) {
|
||||
namespace = pterm.LightMagenta(machine.Namespace.Name)
|
||||
@@ -363,21 +553,95 @@ func nodesToPtables(
|
||||
// Shared into this namespace
|
||||
namespace = pterm.LightYellow(machine.Namespace.Name)
|
||||
}
|
||||
|
||||
var IPV4Address string
|
||||
var IPV6Address string
|
||||
for _, addr := range machine.IpAddresses {
|
||||
if netip.MustParseAddr(addr).Is4() {
|
||||
IPV4Address = addr
|
||||
} else {
|
||||
IPV6Address = addr
|
||||
}
|
||||
}
|
||||
|
||||
nodeData := []string{
|
||||
strconv.FormatUint(machine.Id, headscale.Base10),
|
||||
machine.Name,
|
||||
machine.GetGivenName(),
|
||||
nodeKey.ShortString(),
|
||||
namespace,
|
||||
strings.Join([]string{IPV4Address, IPV6Address}, ", "),
|
||||
strconv.FormatBool(ephemeral),
|
||||
lastSeenTime,
|
||||
online,
|
||||
expired,
|
||||
}
|
||||
if showTags {
|
||||
nodeData = append(nodeData, []string{forcedTags, invalidTags, validTags}...)
|
||||
}
|
||||
tableData = append(
|
||||
tableData,
|
||||
[]string{
|
||||
strconv.FormatUint(machine.Id, headscale.Base10),
|
||||
machine.Name,
|
||||
nodeKey.ShortString(),
|
||||
namespace,
|
||||
strings.Join(machine.IpAddresses, ", "),
|
||||
strconv.FormatBool(ephemeral),
|
||||
lastSeenTime,
|
||||
online,
|
||||
expired,
|
||||
},
|
||||
nodeData,
|
||||
)
|
||||
}
|
||||
|
||||
return tableData, nil
|
||||
}
|
||||
|
||||
var tagCmd = &cobra.Command{
|
||||
Use: "tag",
|
||||
Short: "Manage the tags of a node",
|
||||
Aliases: []string{"tags", "t"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
// retrieve flags from CLI
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
tagsToSet, err := cmd.Flags().GetStringSlice("tags")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error retrieving list of tags to add to machine, %v", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Sending tags to machine
|
||||
request := &v1.SetTagsRequest{
|
||||
MachineId: identifier,
|
||||
Tags: tagsToSet,
|
||||
}
|
||||
resp, err := client.SetTags(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error while sending tags to headscale: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
SuccessOutput(
|
||||
resp.GetMachine(),
|
||||
"Machine updated",
|
||||
output,
|
||||
)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
@@ -3,9 +3,11 @@ package cli
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -13,7 +15,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultPreAuthKeyExpiry = 1 * time.Hour
|
||||
DefaultPreAuthKeyExpiry = "1h"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -31,7 +33,9 @@ func init() {
|
||||
createPreAuthKeyCmd.PersistentFlags().
|
||||
Bool("ephemeral", false, "Preauthkey for ephemeral nodes")
|
||||
createPreAuthKeyCmd.Flags().
|
||||
DurationP("expiration", "e", DefaultPreAuthKeyExpiry, "Human-readable expiration of the key (e.g. 30m, 24h)")
|
||||
StringP("expiration", "e", DefaultPreAuthKeyExpiry, "Human-readable expiration of the key (e.g. 30m, 24h)")
|
||||
createPreAuthKeyCmd.Flags().
|
||||
StringSlice("tags", []string{}, "Tags to automatically assign to node")
|
||||
}
|
||||
|
||||
var preauthkeysCmd = &cobra.Command{
|
||||
@@ -80,7 +84,16 @@ var listPreAuthKeys = &cobra.Command{
|
||||
}
|
||||
|
||||
tableData := pterm.TableData{
|
||||
{"ID", "Key", "Reusable", "Ephemeral", "Used", "Expiration", "Created"},
|
||||
{
|
||||
"ID",
|
||||
"Key",
|
||||
"Reusable",
|
||||
"Ephemeral",
|
||||
"Used",
|
||||
"Expiration",
|
||||
"Created",
|
||||
"Tags",
|
||||
},
|
||||
}
|
||||
for _, key := range response.PreAuthKeys {
|
||||
expiration := "-"
|
||||
@@ -95,6 +108,14 @@ var listPreAuthKeys = &cobra.Command{
|
||||
reusable = fmt.Sprintf("%v", key.GetReusable())
|
||||
}
|
||||
|
||||
aclTags := ""
|
||||
|
||||
for _, tag := range key.AclTags {
|
||||
aclTags += "," + tag
|
||||
}
|
||||
|
||||
aclTags = strings.TrimLeft(aclTags, ",")
|
||||
|
||||
tableData = append(tableData, []string{
|
||||
key.GetId(),
|
||||
key.GetKey(),
|
||||
@@ -103,6 +124,7 @@ var listPreAuthKeys = &cobra.Command{
|
||||
strconv.FormatBool(key.GetUsed()),
|
||||
expiration,
|
||||
key.GetCreatedAt().AsTime().Format("2006-01-02 15:04:05"),
|
||||
aclTags,
|
||||
})
|
||||
|
||||
}
|
||||
@@ -135,6 +157,7 @@ var createPreAuthKeyCmd = &cobra.Command{
|
||||
|
||||
reusable, _ := cmd.Flags().GetBool("reusable")
|
||||
ephemeral, _ := cmd.Flags().GetBool("ephemeral")
|
||||
tags, _ := cmd.Flags().GetStringSlice("tags")
|
||||
|
||||
log.Trace().
|
||||
Bool("reusable", reusable).
|
||||
@@ -146,12 +169,27 @@ var createPreAuthKeyCmd = &cobra.Command{
|
||||
Namespace: namespace,
|
||||
Reusable: reusable,
|
||||
Ephemeral: ephemeral,
|
||||
AclTags: tags,
|
||||
}
|
||||
|
||||
duration, _ := cmd.Flags().GetDuration("expiration")
|
||||
expiration := time.Now().UTC().Add(duration)
|
||||
durationStr, _ := cmd.Flags().GetString("expiration")
|
||||
|
||||
log.Trace().Dur("expiration", duration).Msg("expiration has been set")
|
||||
duration, err := model.ParseDuration(durationStr)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Could not parse duration: %s\n", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
expiration := time.Now().UTC().Add(time.Duration(duration))
|
||||
|
||||
log.Trace().
|
||||
Dur("expiration", time.Duration(duration)).
|
||||
Msg("expiration has been set")
|
||||
|
||||
request.Expiration = timestamppb.New(expiration)
|
||||
|
||||
|
||||
@@ -3,17 +3,86 @@ package cli
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/tcnksm/go-latest"
|
||||
)
|
||||
|
||||
var cfgFile string = ""
|
||||
|
||||
func init() {
|
||||
if len(os.Args) > 1 && (os.Args[1] == "version" || os.Args[1] == "mockoidc") {
|
||||
return
|
||||
}
|
||||
|
||||
cobra.OnInitialize(initConfig)
|
||||
rootCmd.PersistentFlags().
|
||||
StringVarP(&cfgFile, "config", "c", "", "config file (default is /etc/headscale/config.yaml)")
|
||||
rootCmd.PersistentFlags().
|
||||
StringP("output", "o", "", "Output format. Empty for human-readable, 'json', 'json-line' or 'yaml'")
|
||||
rootCmd.PersistentFlags().
|
||||
Bool("force", false, "Disable prompts and forces the execution")
|
||||
}
|
||||
|
||||
func initConfig() {
|
||||
if cfgFile == "" {
|
||||
cfgFile = os.Getenv("HEADSCALE_CONFIG")
|
||||
}
|
||||
if cfgFile != "" {
|
||||
err := headscale.LoadConfig(cfgFile, true)
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err).Msgf("Error loading config file %s", cfgFile)
|
||||
}
|
||||
} else {
|
||||
err := headscale.LoadConfig("", false)
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err).Msgf("Error loading config")
|
||||
}
|
||||
}
|
||||
|
||||
cfg, err := headscale.GetHeadscaleConfig()
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err)
|
||||
}
|
||||
|
||||
machineOutput := HasMachineOutputFlag()
|
||||
|
||||
zerolog.SetGlobalLevel(cfg.Log.Level)
|
||||
|
||||
// If the user has requested a "machine" readable format,
|
||||
// then disable login so the output remains valid.
|
||||
if machineOutput {
|
||||
zerolog.SetGlobalLevel(zerolog.Disabled)
|
||||
}
|
||||
|
||||
if cfg.Log.Format == headscale.JSONLogFormat {
|
||||
log.Logger = log.Output(os.Stdout)
|
||||
}
|
||||
|
||||
if !cfg.DisableUpdateCheck && !machineOutput {
|
||||
if (runtime.GOOS == "linux" || runtime.GOOS == "darwin") &&
|
||||
Version != "dev" {
|
||||
githubTag := &latest.GithubTag{
|
||||
Owner: "juanfont",
|
||||
Repository: "headscale",
|
||||
}
|
||||
res, err := latest.Check(githubTag, Version)
|
||||
if err == nil && res.Outdated {
|
||||
//nolint
|
||||
fmt.Printf(
|
||||
"An updated version of Headscale has been found (%s vs. your current %s). Check it out https://github.com/juanfont/headscale/releases\n",
|
||||
res.Current,
|
||||
Version,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "headscale",
|
||||
Short: "headscale - a Tailscale control server",
|
||||
|
||||
@@ -24,6 +24,8 @@ func init() {
|
||||
enableRouteCmd.Flags().
|
||||
StringSliceP("route", "r", []string{}, "List (or repeated flags) of routes to enable")
|
||||
enableRouteCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
enableRouteCmd.Flags().BoolP("all", "a", false, "All routes from host")
|
||||
|
||||
err = enableRouteCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
@@ -125,21 +127,43 @@ omit the route you do not want to enable.
|
||||
return
|
||||
}
|
||||
|
||||
routes, err := cmd.Flags().GetStringSlice("route")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting routes from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
var routes []string
|
||||
|
||||
isAll, _ := cmd.Flags().GetBool("all")
|
||||
if isAll {
|
||||
response, err := client.GetMachineRoute(ctx, &v1.GetMachineRouteRequest{
|
||||
MachineId: machineID,
|
||||
})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot get machine routes: %s\n",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
routes = response.GetRoutes().GetAdvertisedRoutes()
|
||||
} else {
|
||||
routes, err = cmd.Flags().GetStringSlice("route")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting routes from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
request := &v1.EnableMachineRoutesRequest{
|
||||
MachineId: machineID,
|
||||
Routes: routes,
|
||||
|
||||
@@ -16,12 +16,12 @@ var serveCmd = &cobra.Command{
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
h, err := getHeadscaleApp()
|
||||
app, err := getHeadscaleApp()
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err).Msg("Error initializing")
|
||||
}
|
||||
|
||||
err = h.Serve()
|
||||
err = app.Serve()
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err).Msg("Error starting server")
|
||||
}
|
||||
|
||||
@@ -4,399 +4,33 @@ import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"reflect"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/viper"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"gopkg.in/yaml.v2"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/dnstype"
|
||||
)
|
||||
|
||||
const (
|
||||
PermissionFallback = 0o700
|
||||
HeadscaleDateTimeFormat = "2006-01-02 15:04:05"
|
||||
SocketWritePermissions = 0o666
|
||||
)
|
||||
|
||||
func LoadConfig(path string) error {
|
||||
viper.SetConfigName("config")
|
||||
if path == "" {
|
||||
viper.AddConfigPath("/etc/headscale/")
|
||||
viper.AddConfigPath("$HOME/.headscale")
|
||||
viper.AddConfigPath(".")
|
||||
} else {
|
||||
// For testing
|
||||
viper.AddConfigPath(path)
|
||||
}
|
||||
|
||||
viper.SetEnvPrefix("headscale")
|
||||
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
||||
viper.AutomaticEnv()
|
||||
|
||||
viper.SetDefault("tls_letsencrypt_cache_dir", "/var/www/.cache")
|
||||
viper.SetDefault("tls_letsencrypt_challenge_type", "HTTP-01")
|
||||
viper.SetDefault("tls_client_auth_mode", "relaxed")
|
||||
|
||||
viper.SetDefault("log_level", "info")
|
||||
|
||||
viper.SetDefault("dns_config", nil)
|
||||
|
||||
viper.SetDefault("derp.server.enabled", false)
|
||||
viper.SetDefault("derp.server.stun.enabled", true)
|
||||
|
||||
viper.SetDefault("unix_socket", "/var/run/headscale.sock")
|
||||
viper.SetDefault("unix_socket_permission", "0o770")
|
||||
|
||||
viper.SetDefault("grpc_listen_addr", ":50443")
|
||||
viper.SetDefault("grpc_allow_insecure", false)
|
||||
|
||||
viper.SetDefault("cli.timeout", "5s")
|
||||
viper.SetDefault("cli.insecure", false)
|
||||
|
||||
viper.SetDefault("oidc.strip_email_domain", true)
|
||||
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
return fmt.Errorf("fatal error reading config file: %w", err)
|
||||
}
|
||||
|
||||
// Collect any validation errors and return them all at once
|
||||
var errorText string
|
||||
if (viper.GetString("tls_letsencrypt_hostname") != "") &&
|
||||
((viper.GetString("tls_cert_path") != "") || (viper.GetString("tls_key_path") != "")) {
|
||||
errorText += "Fatal config error: set either tls_letsencrypt_hostname or tls_cert_path/tls_key_path, not both\n"
|
||||
}
|
||||
|
||||
if (viper.GetString("tls_letsencrypt_hostname") != "") &&
|
||||
(viper.GetString("tls_letsencrypt_challenge_type") == "TLS-ALPN-01") &&
|
||||
(!strings.HasSuffix(viper.GetString("listen_addr"), ":443")) {
|
||||
// this is only a warning because there could be something sitting in front of headscale that redirects the traffic (e.g. an iptables rule)
|
||||
log.Warn().
|
||||
Msg("Warning: when using tls_letsencrypt_hostname with TLS-ALPN-01 as challenge type, headscale must be reachable on port 443, i.e. listen_addr should probably end in :443")
|
||||
}
|
||||
|
||||
if (viper.GetString("tls_letsencrypt_challenge_type") != "HTTP-01") &&
|
||||
(viper.GetString("tls_letsencrypt_challenge_type") != "TLS-ALPN-01") {
|
||||
errorText += "Fatal config error: the only supported values for tls_letsencrypt_challenge_type are HTTP-01 and TLS-ALPN-01\n"
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(viper.GetString("server_url"), "http://") &&
|
||||
!strings.HasPrefix(viper.GetString("server_url"), "https://") {
|
||||
errorText += "Fatal config error: server_url must start with https:// or http://\n"
|
||||
}
|
||||
|
||||
_, authModeValid := headscale.LookupTLSClientAuthMode(
|
||||
viper.GetString("tls_client_auth_mode"),
|
||||
)
|
||||
|
||||
if !authModeValid {
|
||||
errorText += fmt.Sprintf(
|
||||
"Invalid tls_client_auth_mode supplied: %s. Accepted values: %s, %s, %s.",
|
||||
viper.GetString("tls_client_auth_mode"),
|
||||
headscale.DisabledClientAuth,
|
||||
headscale.RelaxedClientAuth,
|
||||
headscale.EnforcedClientAuth)
|
||||
}
|
||||
|
||||
if errorText != "" {
|
||||
//nolint
|
||||
return errors.New(strings.TrimSuffix(errorText, "\n"))
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func GetDERPConfig() headscale.DERPConfig {
|
||||
serverEnabled := viper.GetBool("derp.server.enabled")
|
||||
serverRegionID := viper.GetInt("derp.server.region_id")
|
||||
serverRegionCode := viper.GetString("derp.server.region_code")
|
||||
serverRegionName := viper.GetString("derp.server.region_name")
|
||||
stunAddr := viper.GetString("derp.server.stun_listen_addr")
|
||||
|
||||
if serverEnabled && stunAddr == "" {
|
||||
log.Fatal().Msg("derp.server.stun_listen_addr must be set if derp.server.enabled is true")
|
||||
}
|
||||
|
||||
urlStrs := viper.GetStringSlice("derp.urls")
|
||||
|
||||
urls := make([]url.URL, len(urlStrs))
|
||||
for index, urlStr := range urlStrs {
|
||||
urlAddr, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("url", urlStr).
|
||||
Err(err).
|
||||
Msg("Failed to parse url, ignoring...")
|
||||
}
|
||||
|
||||
urls[index] = *urlAddr
|
||||
}
|
||||
|
||||
paths := viper.GetStringSlice("derp.paths")
|
||||
|
||||
autoUpdate := viper.GetBool("derp.auto_update_enabled")
|
||||
updateFrequency := viper.GetDuration("derp.update_frequency")
|
||||
|
||||
return headscale.DERPConfig{
|
||||
ServerEnabled: serverEnabled,
|
||||
ServerRegionID: serverRegionID,
|
||||
ServerRegionCode: serverRegionCode,
|
||||
ServerRegionName: serverRegionName,
|
||||
STUNAddr: stunAddr,
|
||||
URLs: urls,
|
||||
Paths: paths,
|
||||
AutoUpdate: autoUpdate,
|
||||
UpdateFrequency: updateFrequency,
|
||||
}
|
||||
}
|
||||
|
||||
func GetDNSConfig() (*tailcfg.DNSConfig, string) {
|
||||
if viper.IsSet("dns_config") {
|
||||
dnsConfig := &tailcfg.DNSConfig{}
|
||||
|
||||
if viper.IsSet("dns_config.nameservers") {
|
||||
nameserversStr := viper.GetStringSlice("dns_config.nameservers")
|
||||
|
||||
nameservers := make([]netaddr.IP, len(nameserversStr))
|
||||
resolvers := make([]dnstype.Resolver, len(nameserversStr))
|
||||
|
||||
for index, nameserverStr := range nameserversStr {
|
||||
nameserver, err := netaddr.ParseIP(nameserverStr)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "getDNSConfig").
|
||||
Err(err).
|
||||
Msgf("Could not parse nameserver IP: %s", nameserverStr)
|
||||
}
|
||||
|
||||
nameservers[index] = nameserver
|
||||
resolvers[index] = dnstype.Resolver{
|
||||
Addr: nameserver.String(),
|
||||
}
|
||||
}
|
||||
|
||||
dnsConfig.Nameservers = nameservers
|
||||
dnsConfig.Resolvers = resolvers
|
||||
}
|
||||
|
||||
if viper.IsSet("dns_config.restricted_nameservers") {
|
||||
if len(dnsConfig.Nameservers) > 0 {
|
||||
dnsConfig.Routes = make(map[string][]dnstype.Resolver)
|
||||
restrictedDNS := viper.GetStringMapStringSlice(
|
||||
"dns_config.restricted_nameservers",
|
||||
)
|
||||
for domain, restrictedNameservers := range restrictedDNS {
|
||||
restrictedResolvers := make(
|
||||
[]dnstype.Resolver,
|
||||
len(restrictedNameservers),
|
||||
)
|
||||
for index, nameserverStr := range restrictedNameservers {
|
||||
nameserver, err := netaddr.ParseIP(nameserverStr)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "getDNSConfig").
|
||||
Err(err).
|
||||
Msgf("Could not parse restricted nameserver IP: %s", nameserverStr)
|
||||
}
|
||||
restrictedResolvers[index] = dnstype.Resolver{
|
||||
Addr: nameserver.String(),
|
||||
}
|
||||
}
|
||||
dnsConfig.Routes[domain] = restrictedResolvers
|
||||
}
|
||||
} else {
|
||||
log.Warn().
|
||||
Msg("Warning: dns_config.restricted_nameservers is set, but no nameservers are configured. Ignoring restricted_nameservers.")
|
||||
}
|
||||
}
|
||||
|
||||
if viper.IsSet("dns_config.domains") {
|
||||
dnsConfig.Domains = viper.GetStringSlice("dns_config.domains")
|
||||
}
|
||||
|
||||
if viper.IsSet("dns_config.magic_dns") {
|
||||
magicDNS := viper.GetBool("dns_config.magic_dns")
|
||||
if len(dnsConfig.Nameservers) > 0 {
|
||||
dnsConfig.Proxied = magicDNS
|
||||
} else if magicDNS {
|
||||
log.Warn().
|
||||
Msg("Warning: dns_config.magic_dns is set, but no nameservers are configured. Ignoring magic_dns.")
|
||||
}
|
||||
}
|
||||
|
||||
var baseDomain string
|
||||
if viper.IsSet("dns_config.base_domain") {
|
||||
baseDomain = viper.GetString("dns_config.base_domain")
|
||||
} else {
|
||||
baseDomain = "headscale.net" // does not really matter when MagicDNS is not enabled
|
||||
}
|
||||
|
||||
return dnsConfig, baseDomain
|
||||
}
|
||||
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
func absPath(path string) string {
|
||||
// If a relative path is provided, prefix it with the the directory where
|
||||
// the config file was found.
|
||||
if (path != "") && !strings.HasPrefix(path, string(os.PathSeparator)) {
|
||||
dir, _ := filepath.Split(viper.ConfigFileUsed())
|
||||
if dir != "" {
|
||||
path = filepath.Join(dir, path)
|
||||
}
|
||||
}
|
||||
|
||||
return path
|
||||
}
|
||||
|
||||
func getHeadscaleConfig() headscale.Config {
|
||||
dnsConfig, baseDomain := GetDNSConfig()
|
||||
derpConfig := GetDERPConfig()
|
||||
|
||||
configuredPrefixes := viper.GetStringSlice("ip_prefixes")
|
||||
parsedPrefixes := make([]netaddr.IPPrefix, 0, len(configuredPrefixes)+1)
|
||||
|
||||
legacyPrefixField := viper.GetString("ip_prefix")
|
||||
if len(legacyPrefixField) > 0 {
|
||||
log.
|
||||
Warn().
|
||||
Msgf(
|
||||
"%s, %s",
|
||||
"use of 'ip_prefix' for configuration is deprecated",
|
||||
"please see 'ip_prefixes' in the shipped example.",
|
||||
)
|
||||
legacyPrefix, err := netaddr.ParseIPPrefix(legacyPrefixField)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to parse ip_prefix: %w", err))
|
||||
}
|
||||
parsedPrefixes = append(parsedPrefixes, legacyPrefix)
|
||||
}
|
||||
|
||||
for i, prefixInConfig := range configuredPrefixes {
|
||||
prefix, err := netaddr.ParseIPPrefix(prefixInConfig)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to parse ip_prefixes[%d]: %w", i, err))
|
||||
}
|
||||
parsedPrefixes = append(parsedPrefixes, prefix)
|
||||
}
|
||||
|
||||
prefixes := make([]netaddr.IPPrefix, 0, len(parsedPrefixes))
|
||||
{
|
||||
// dedup
|
||||
normalizedPrefixes := make(map[string]int, len(parsedPrefixes))
|
||||
for i, p := range parsedPrefixes {
|
||||
normalized, _ := p.Range().Prefix()
|
||||
normalizedPrefixes[normalized.String()] = i
|
||||
}
|
||||
|
||||
// convert back to list
|
||||
for _, i := range normalizedPrefixes {
|
||||
prefixes = append(prefixes, parsedPrefixes[i])
|
||||
}
|
||||
}
|
||||
|
||||
if len(prefixes) < 1 {
|
||||
prefixes = append(prefixes, netaddr.MustParseIPPrefix("100.64.0.0/10"))
|
||||
log.Warn().
|
||||
Msgf("'ip_prefixes' not configured, falling back to default: %v", prefixes)
|
||||
}
|
||||
|
||||
tlsClientAuthMode, _ := headscale.LookupTLSClientAuthMode(
|
||||
viper.GetString("tls_client_auth_mode"),
|
||||
)
|
||||
|
||||
return headscale.Config{
|
||||
ServerURL: viper.GetString("server_url"),
|
||||
Addr: viper.GetString("listen_addr"),
|
||||
MetricsAddr: viper.GetString("metrics_listen_addr"),
|
||||
GRPCAddr: viper.GetString("grpc_listen_addr"),
|
||||
GRPCAllowInsecure: viper.GetBool("grpc_allow_insecure"),
|
||||
|
||||
IPPrefixes: prefixes,
|
||||
PrivateKeyPath: absPath(viper.GetString("private_key_path")),
|
||||
BaseDomain: baseDomain,
|
||||
|
||||
DERP: derpConfig,
|
||||
|
||||
EphemeralNodeInactivityTimeout: viper.GetDuration(
|
||||
"ephemeral_node_inactivity_timeout",
|
||||
),
|
||||
|
||||
DBtype: viper.GetString("db_type"),
|
||||
DBpath: absPath(viper.GetString("db_path")),
|
||||
DBhost: viper.GetString("db_host"),
|
||||
DBport: viper.GetInt("db_port"),
|
||||
DBname: viper.GetString("db_name"),
|
||||
DBuser: viper.GetString("db_user"),
|
||||
DBpass: viper.GetString("db_pass"),
|
||||
|
||||
TLSLetsEncryptHostname: viper.GetString("tls_letsencrypt_hostname"),
|
||||
TLSLetsEncryptListen: viper.GetString("tls_letsencrypt_listen"),
|
||||
TLSLetsEncryptCacheDir: absPath(
|
||||
viper.GetString("tls_letsencrypt_cache_dir"),
|
||||
),
|
||||
TLSLetsEncryptChallengeType: viper.GetString("tls_letsencrypt_challenge_type"),
|
||||
|
||||
TLSCertPath: absPath(viper.GetString("tls_cert_path")),
|
||||
TLSKeyPath: absPath(viper.GetString("tls_key_path")),
|
||||
TLSClientAuthMode: tlsClientAuthMode,
|
||||
|
||||
DNSConfig: dnsConfig,
|
||||
|
||||
ACMEEmail: viper.GetString("acme_email"),
|
||||
ACMEURL: viper.GetString("acme_url"),
|
||||
|
||||
UnixSocket: viper.GetString("unix_socket"),
|
||||
UnixSocketPermission: GetFileMode("unix_socket_permission"),
|
||||
|
||||
OIDC: headscale.OIDCConfig{
|
||||
Issuer: viper.GetString("oidc.issuer"),
|
||||
ClientID: viper.GetString("oidc.client_id"),
|
||||
ClientSecret: viper.GetString("oidc.client_secret"),
|
||||
StripEmaildomain: viper.GetBool("oidc.strip_email_domain"),
|
||||
},
|
||||
|
||||
CLI: headscale.CLIConfig{
|
||||
Address: viper.GetString("cli.address"),
|
||||
APIKey: viper.GetString("cli.api_key"),
|
||||
Timeout: viper.GetDuration("cli.timeout"),
|
||||
Insecure: viper.GetBool("cli.insecure"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getHeadscaleApp() (*headscale.Headscale, error) {
|
||||
// Minimum inactivity time out is keepalive timeout (60s) plus a few seconds
|
||||
// to avoid races
|
||||
minInactivityTimeout, _ := time.ParseDuration("65s")
|
||||
if viper.GetDuration("ephemeral_node_inactivity_timeout") <= minInactivityTimeout {
|
||||
// TODO: Find a better way to return this text
|
||||
//nolint
|
||||
err := fmt.Errorf(
|
||||
"ephemeral_node_inactivity_timeout (%s) is set too low, must be more than %s",
|
||||
viper.GetString("ephemeral_node_inactivity_timeout"),
|
||||
minInactivityTimeout,
|
||||
cfg, err := headscale.GetHeadscaleConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to load configuration while creating headscale instance: %w",
|
||||
err,
|
||||
)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := getHeadscaleConfig()
|
||||
|
||||
app, err := headscale.NewHeadscale(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -404,8 +38,8 @@ func getHeadscaleApp() (*headscale.Headscale, error) {
|
||||
|
||||
// We are doing this here, as in the future could be cool to have it also hot-reload
|
||||
|
||||
if viper.GetString("acl_policy_path") != "" {
|
||||
aclPath := absPath(viper.GetString("acl_policy_path"))
|
||||
if cfg.ACL.PolicyPath != "" {
|
||||
aclPath := headscale.AbsolutePathFromConfigPath(cfg.ACL.PolicyPath)
|
||||
err = app.LoadACLPolicy(aclPath)
|
||||
if err != nil {
|
||||
log.Fatal().
|
||||
@@ -419,7 +53,14 @@ func getHeadscaleApp() (*headscale.Headscale, error) {
|
||||
}
|
||||
|
||||
func getHeadscaleCLIClient() (context.Context, v1.HeadscaleServiceClient, *grpc.ClientConn, context.CancelFunc) {
|
||||
cfg := getHeadscaleConfig()
|
||||
cfg, err := headscale.GetHeadscaleConfig()
|
||||
if err != nil {
|
||||
log.Fatal().
|
||||
Err(err).
|
||||
Caller().
|
||||
Msgf("Failed to load configuration")
|
||||
os.Exit(-1) // we get here if logging is suppressed (i.e., json output)
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Dur("timeout", cfg.CLI.Timeout).
|
||||
@@ -441,6 +82,19 @@ func getHeadscaleCLIClient() (context.Context, v1.HeadscaleServiceClient, *grpc.
|
||||
|
||||
address = cfg.UnixSocket
|
||||
|
||||
// Try to give the user better feedback if we cannot write to the headscale
|
||||
// socket.
|
||||
socket, err := os.OpenFile(cfg.UnixSocket, os.O_WRONLY, SocketWritePermissions) //nolint
|
||||
if err != nil {
|
||||
if os.IsPermission(err) {
|
||||
log.Fatal().
|
||||
Err(err).
|
||||
Str("socket", cfg.UnixSocket).
|
||||
Msgf("Unable to read/write to headscale socket, do you have the correct permissions?")
|
||||
}
|
||||
}
|
||||
socket.Close()
|
||||
|
||||
grpcOptions = append(
|
||||
grpcOptions,
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
@@ -480,6 +134,7 @@ func getHeadscaleCLIClient() (context.Context, v1.HeadscaleServiceClient, *grpc.
|
||||
conn, err := grpc.DialContext(ctx, address, grpcOptions...)
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err).Msgf("Could not connect: %v", err)
|
||||
os.Exit(-1) // we get here if logging is suppressed (i.e., json output)
|
||||
}
|
||||
|
||||
client := v1.NewHeadscaleServiceClient(conn)
|
||||
@@ -553,13 +208,12 @@ func (tokenAuth) RequireTransportSecurity() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func GetFileMode(key string) fs.FileMode {
|
||||
modeStr := viper.GetString(key)
|
||||
|
||||
mode, err := strconv.ParseUint(modeStr, headscale.Base8, headscale.BitSize64)
|
||||
if err != nil {
|
||||
return PermissionFallback
|
||||
func contains[T string](ts []T, t T) bool {
|
||||
for _, v := range ts {
|
||||
if reflect.DeepEqual(v, t) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return fs.FileMode(mode)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1,17 +1,13 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/efekarakus/termcolor"
|
||||
"github.com/juanfont/headscale/cmd/headscale/cli"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/tcnksm/go-latest"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -43,44 +39,5 @@ func main() {
|
||||
NoColor: !colors,
|
||||
})
|
||||
|
||||
if err := cli.LoadConfig(""); err != nil {
|
||||
log.Fatal().Caller().Err(err)
|
||||
}
|
||||
|
||||
machineOutput := cli.HasMachineOutputFlag()
|
||||
|
||||
logLevel := viper.GetString("log_level")
|
||||
level, err := zerolog.ParseLevel(logLevel)
|
||||
if err != nil {
|
||||
zerolog.SetGlobalLevel(zerolog.DebugLevel)
|
||||
} else {
|
||||
zerolog.SetGlobalLevel(level)
|
||||
}
|
||||
|
||||
// If the user has requested a "machine" readable format,
|
||||
// then disable login so the output remains valid.
|
||||
if machineOutput {
|
||||
zerolog.SetGlobalLevel(zerolog.Disabled)
|
||||
}
|
||||
|
||||
if !viper.GetBool("disable_check_updates") && !machineOutput {
|
||||
if (runtime.GOOS == "linux" || runtime.GOOS == "darwin") &&
|
||||
cli.Version != "dev" {
|
||||
githubTag := &latest.GithubTag{
|
||||
Owner: "juanfont",
|
||||
Repository: "headscale",
|
||||
}
|
||||
res, err := latest.Check(githubTag, cli.Version)
|
||||
if err == nil && res.Outdated {
|
||||
//nolint
|
||||
fmt.Printf(
|
||||
"An updated version of Headscale has been found (%s vs. your current %s). Check it out https://github.com/juanfont/headscale/releases\n",
|
||||
res.Current,
|
||||
cli.Version,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cli.Execute()
|
||||
}
|
||||
|
||||
@@ -2,13 +2,12 @@ package main
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/juanfont/headscale/cmd/headscale/cli"
|
||||
"github.com/juanfont/headscale"
|
||||
"github.com/spf13/viper"
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
@@ -27,8 +26,8 @@ func (s *Suite) SetUpSuite(c *check.C) {
|
||||
func (s *Suite) TearDownSuite(c *check.C) {
|
||||
}
|
||||
|
||||
func (*Suite) TestConfigLoading(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "headscale")
|
||||
func (*Suite) TestConfigFileLoading(c *check.C) {
|
||||
tmpDir, err := os.MkdirTemp("", "headscale")
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
@@ -39,17 +38,19 @@ func (*Suite) TestConfigLoading(c *check.C) {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
cfgFile := filepath.Join(tmpDir, "config.yaml")
|
||||
|
||||
// Symlink the example config file
|
||||
err = os.Symlink(
|
||||
filepath.Clean(path+"/../../config-example.yaml"),
|
||||
filepath.Join(tmpDir, "config.yaml"),
|
||||
cfgFile,
|
||||
)
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// Load example config, it should load without validation errors
|
||||
err = cli.LoadConfig(tmpDir)
|
||||
err = headscale.LoadConfig(cfgFile, true)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// Test that config file was interpreted correctly
|
||||
@@ -63,14 +64,15 @@ func (*Suite) TestConfigLoading(c *check.C) {
|
||||
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
|
||||
c.Assert(viper.GetStringSlice("dns_config.nameservers")[0], check.Equals, "1.1.1.1")
|
||||
c.Assert(
|
||||
cli.GetFileMode("unix_socket_permission"),
|
||||
headscale.GetFileMode("unix_socket_permission"),
|
||||
check.Equals,
|
||||
fs.FileMode(0o770),
|
||||
)
|
||||
c.Assert(viper.GetBool("logtail.enabled"), check.Equals, false)
|
||||
}
|
||||
|
||||
func (*Suite) TestDNSConfigLoading(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "headscale")
|
||||
func (*Suite) TestConfigLoading(c *check.C) {
|
||||
tmpDir, err := os.MkdirTemp("", "headscale")
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
@@ -91,10 +93,54 @@ func (*Suite) TestDNSConfigLoading(c *check.C) {
|
||||
}
|
||||
|
||||
// Load example config, it should load without validation errors
|
||||
err = cli.LoadConfig(tmpDir)
|
||||
err = headscale.LoadConfig(tmpDir, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
dnsConfig, baseDomain := cli.GetDNSConfig()
|
||||
// Test that config file was interpreted correctly
|
||||
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "0.0.0.0:8080")
|
||||
c.Assert(viper.GetString("metrics_listen_addr"), check.Equals, "127.0.0.1:9090")
|
||||
c.Assert(viper.GetString("db_type"), check.Equals, "sqlite3")
|
||||
c.Assert(viper.GetString("db_path"), check.Equals, "/var/lib/headscale/db.sqlite")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
|
||||
c.Assert(viper.GetStringSlice("dns_config.nameservers")[0], check.Equals, "1.1.1.1")
|
||||
c.Assert(
|
||||
headscale.GetFileMode("unix_socket_permission"),
|
||||
check.Equals,
|
||||
fs.FileMode(0o770),
|
||||
)
|
||||
c.Assert(viper.GetBool("logtail.enabled"), check.Equals, false)
|
||||
c.Assert(viper.GetBool("randomize_client_port"), check.Equals, false)
|
||||
}
|
||||
|
||||
func (*Suite) TestDNSConfigLoading(c *check.C) {
|
||||
tmpDir, err := os.MkdirTemp("", "headscale")
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// Symlink the example config file
|
||||
err = os.Symlink(
|
||||
filepath.Clean(path+"/../../config-example.yaml"),
|
||||
filepath.Join(tmpDir, "config.yaml"),
|
||||
)
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// Load example config, it should load without validation errors
|
||||
err = headscale.LoadConfig(tmpDir, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
dnsConfig, baseDomain := headscale.GetDNSConfig()
|
||||
|
||||
c.Assert(dnsConfig.Nameservers[0].String(), check.Equals, "1.1.1.1")
|
||||
c.Assert(dnsConfig.Resolvers[0].Addr, check.Equals, "1.1.1.1")
|
||||
@@ -105,26 +151,28 @@ func (*Suite) TestDNSConfigLoading(c *check.C) {
|
||||
func writeConfig(c *check.C, tmpDir string, configYaml []byte) {
|
||||
// Populate a custom config file
|
||||
configFile := filepath.Join(tmpDir, "config.yaml")
|
||||
err := ioutil.WriteFile(configFile, configYaml, 0o600)
|
||||
err := os.WriteFile(configFile, configYaml, 0o600)
|
||||
if err != nil {
|
||||
c.Fatalf("Couldn't write file %s", configFile)
|
||||
}
|
||||
}
|
||||
|
||||
func (*Suite) TestTLSConfigValidation(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "headscale")
|
||||
tmpDir, err := os.MkdirTemp("", "headscale")
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
// defer os.RemoveAll(tmpDir)
|
||||
|
||||
configYaml := []byte(
|
||||
"---\ntls_letsencrypt_hostname: \"example.com\"\ntls_letsencrypt_challenge_type: \"\"\ntls_cert_path: \"abc.pem\"",
|
||||
)
|
||||
configYaml := []byte(`---
|
||||
tls_letsencrypt_hostname: example.com
|
||||
tls_letsencrypt_challenge_type: ""
|
||||
tls_cert_path: abc.pem
|
||||
noise:
|
||||
private_key_path: noise_private.key`)
|
||||
writeConfig(c, tmpDir, configYaml)
|
||||
|
||||
// Check configuration validation errors (1)
|
||||
err = cli.LoadConfig(tmpDir)
|
||||
err = headscale.LoadConfig(tmpDir, false)
|
||||
c.Assert(err, check.NotNil)
|
||||
// check.Matches can not handle multiline strings
|
||||
tmp := strings.ReplaceAll(err.Error(), "\n", "***")
|
||||
@@ -145,10 +193,14 @@ func (*Suite) TestTLSConfigValidation(c *check.C) {
|
||||
)
|
||||
|
||||
// Check configuration validation errors (2)
|
||||
configYaml = []byte(
|
||||
"---\nserver_url: \"http://127.0.0.1:8080\"\ntls_letsencrypt_hostname: \"example.com\"\ntls_letsencrypt_challenge_type: \"TLS-ALPN-01\"",
|
||||
)
|
||||
configYaml = []byte(`---
|
||||
noise:
|
||||
private_key_path: noise_private.key
|
||||
server_url: http://127.0.0.1:8080
|
||||
tls_letsencrypt_hostname: example.com
|
||||
tls_letsencrypt_challenge_type: TLS-ALPN-01
|
||||
`)
|
||||
writeConfig(c, tmpDir, configYaml)
|
||||
err = cli.LoadConfig(tmpDir)
|
||||
err = headscale.LoadConfig(tmpDir, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
@@ -35,12 +35,20 @@ grpc_listen_addr: 0.0.0.0:50443
|
||||
# are doing.
|
||||
grpc_allow_insecure: false
|
||||
|
||||
# Private key used encrypt the traffic between headscale
|
||||
# Private key used to encrypt the traffic between headscale
|
||||
# and Tailscale clients.
|
||||
# The private key file which will be
|
||||
# autogenerated if it's missing
|
||||
# The private key file will be autogenerated if it's missing.
|
||||
private_key_path: /var/lib/headscale/private.key
|
||||
|
||||
# The Noise section includes specific configuration for the
|
||||
# TS2021 Noise protocol
|
||||
noise:
|
||||
# The Noise private key is used to encrypt the
|
||||
# traffic between headscale and Tailscale clients when
|
||||
# using the new Noise-based protocol. It must be different
|
||||
# from the legacy private key.
|
||||
private_key_path: /var/lib/headscale/noise_private.key
|
||||
|
||||
# List of IP prefixes to allocate tailaddresses from.
|
||||
# Each prefix consists of either an IPv4 or IPv6 address,
|
||||
# and the associated prefix length, delimited by a slash.
|
||||
@@ -69,7 +77,7 @@ derp:
|
||||
region_code: "headscale"
|
||||
region_name: "Headscale Embedded DERP"
|
||||
|
||||
# Listens in UDP at the configured address for STUN connections to help on NAT traversal.
|
||||
# Listens over UDP at the configured address for STUN connections - to help with NAT traversal.
|
||||
# When the embedded DERP server is enabled stun_listen_addr MUST be defined.
|
||||
#
|
||||
# For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
|
||||
@@ -103,17 +111,25 @@ disable_check_updates: false
|
||||
# Time before an inactive ephemeral node is deleted?
|
||||
ephemeral_node_inactivity_timeout: 30m
|
||||
|
||||
# Period to check for node updates within the tailnet. A value too low will severely affect
|
||||
# CPU consumption of Headscale. A value too high (over 60s) will cause problems
|
||||
# for the nodes, as they won't get updates or keep alive messages frequently enough.
|
||||
# In case of doubts, do not touch the default 10s.
|
||||
node_update_check_interval: 10s
|
||||
|
||||
# SQLite config
|
||||
db_type: sqlite3
|
||||
db_path: /var/lib/headscale/db.sqlite
|
||||
|
||||
# # Postgres config
|
||||
# If using a Unix socket to connect to Postgres, set the socket path in the 'host' field and leave 'port' blank.
|
||||
# db_type: postgres
|
||||
# db_host: localhost
|
||||
# db_port: 5432
|
||||
# db_name: headscale
|
||||
# db_user: foo
|
||||
# db_pass: bar
|
||||
# db_ssl: false
|
||||
|
||||
### TLS configuration
|
||||
#
|
||||
@@ -147,7 +163,7 @@ tls_letsencrypt_cache_dir: /var/lib/headscale/cache
|
||||
# See [docs/tls.md](docs/tls.md) for more information
|
||||
tls_letsencrypt_challenge_type: HTTP-01
|
||||
# When HTTP-01 challenge is chosen, letsencrypt must set up a
|
||||
# verification endpoint, and it will be listning on:
|
||||
# verification endpoint, and it will be listening on:
|
||||
# :http = port 80
|
||||
tls_letsencrypt_listen: ":http"
|
||||
|
||||
@@ -155,7 +171,10 @@ tls_letsencrypt_listen: ":http"
|
||||
tls_cert_path: ""
|
||||
tls_key_path: ""
|
||||
|
||||
log_level: info
|
||||
log:
|
||||
# Output formatting for logs: text or json
|
||||
format: text
|
||||
level: info
|
||||
|
||||
# Path to a file containg ACL policies.
|
||||
# ACLs can be defined as YAML or HUJSON.
|
||||
@@ -172,6 +191,9 @@ acl_policy_path: ""
|
||||
# - https://tailscale.com/blog/2021-09-private-dns-with-magicdns/
|
||||
#
|
||||
dns_config:
|
||||
# Whether to prefer using Headscale provided DNS or use local.
|
||||
override_local_dns: true
|
||||
|
||||
# List of DNS servers to expose to clients.
|
||||
nameservers:
|
||||
- 1.1.1.1
|
||||
@@ -210,13 +232,43 @@ unix_socket_permission: "0770"
|
||||
# help us test it.
|
||||
# OpenID Connect
|
||||
# oidc:
|
||||
# only_start_if_oidc_is_available: true
|
||||
# issuer: "https://your-oidc.issuer.com/path"
|
||||
# client_id: "your-oidc-client-id"
|
||||
# client_secret: "your-oidc-client-secret"
|
||||
#
|
||||
# Customize the scopes used in the OIDC flow, defaults to "openid", "profile" and "email" and add custom query
|
||||
# parameters to the Authorize Endpoint request. Scopes default to "openid", "profile" and "email".
|
||||
#
|
||||
# scope: ["openid", "profile", "email", "custom"]
|
||||
# extra_params:
|
||||
# domain_hint: example.com
|
||||
#
|
||||
# List allowed principal domains and/or users. If an authenticated user's domain is not in this list, the
|
||||
# authentication request will be rejected.
|
||||
#
|
||||
# allowed_domains:
|
||||
# - example.com
|
||||
# allowed_users:
|
||||
# - alice@example.com
|
||||
#
|
||||
# If `strip_email_domain` is set to `true`, the domain part of the username email address will be removed.
|
||||
# This will transform `first-name.last-name@example.com` to the namespace `first-name.last-name`
|
||||
# If `strip_email_domain` is set to `false` the domain part will NOT be removed resulting to the following
|
||||
# namespace: `first-name.last-name.example.com`
|
||||
#
|
||||
# strip_email_domain: true
|
||||
|
||||
# Logtail configuration
|
||||
# Logtail is Tailscales logging and auditing infrastructure, it allows the control panel
|
||||
# to instruct tailscale nodes to log their activity to a remote server.
|
||||
logtail:
|
||||
# Enable logtail for this headscales clients.
|
||||
# As there is currently no support for overriding the log server in headscale, this is
|
||||
# disabled by default. Enabling this will make your clients send logs to Tailscale Inc.
|
||||
enabled: false
|
||||
|
||||
# Enabling this option makes devices prefer a random port for WireGuard traffic over the
|
||||
# default static port 41641. This option is intended as a workaround for some buggy
|
||||
# firewall devices. See https://tailscale.com/kb/1181/firewalls/ for more information.
|
||||
randomize_client_port: false
|
||||
|
||||
583
config.go
Normal file
583
config.go
Normal file
@@ -0,0 +1,583 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/viper"
|
||||
"go4.org/netipx"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/dnstype"
|
||||
)
|
||||
|
||||
const (
|
||||
tlsALPN01ChallengeType = "TLS-ALPN-01"
|
||||
http01ChallengeType = "HTTP-01"
|
||||
|
||||
JSONLogFormat = "json"
|
||||
TextLogFormat = "text"
|
||||
)
|
||||
|
||||
// Config contains the initial Headscale configuration.
|
||||
type Config struct {
|
||||
ServerURL string
|
||||
Addr string
|
||||
MetricsAddr string
|
||||
GRPCAddr string
|
||||
GRPCAllowInsecure bool
|
||||
EphemeralNodeInactivityTimeout time.Duration
|
||||
NodeUpdateCheckInterval time.Duration
|
||||
IPPrefixes []netip.Prefix
|
||||
PrivateKeyPath string
|
||||
NoisePrivateKeyPath string
|
||||
BaseDomain string
|
||||
Log LogConfig
|
||||
DisableUpdateCheck bool
|
||||
|
||||
DERP DERPConfig
|
||||
|
||||
DBtype string
|
||||
DBpath string
|
||||
DBhost string
|
||||
DBport int
|
||||
DBname string
|
||||
DBuser string
|
||||
DBpass string
|
||||
DBssl bool
|
||||
|
||||
TLS TLSConfig
|
||||
|
||||
ACMEURL string
|
||||
ACMEEmail string
|
||||
|
||||
DNSConfig *tailcfg.DNSConfig
|
||||
|
||||
UnixSocket string
|
||||
UnixSocketPermission fs.FileMode
|
||||
|
||||
OIDC OIDCConfig
|
||||
|
||||
LogTail LogTailConfig
|
||||
RandomizeClientPort bool
|
||||
|
||||
CLI CLIConfig
|
||||
|
||||
ACL ACLConfig
|
||||
}
|
||||
|
||||
type TLSConfig struct {
|
||||
CertPath string
|
||||
KeyPath string
|
||||
ClientAuthMode tls.ClientAuthType
|
||||
|
||||
LetsEncrypt LetsEncryptConfig
|
||||
}
|
||||
|
||||
type LetsEncryptConfig struct {
|
||||
Listen string
|
||||
Hostname string
|
||||
CacheDir string
|
||||
ChallengeType string
|
||||
}
|
||||
|
||||
type OIDCConfig struct {
|
||||
OnlyStartIfOIDCIsAvailable bool
|
||||
Issuer string
|
||||
ClientID string
|
||||
ClientSecret string
|
||||
Scope []string
|
||||
ExtraParams map[string]string
|
||||
AllowedDomains []string
|
||||
AllowedUsers []string
|
||||
StripEmaildomain bool
|
||||
}
|
||||
|
||||
type DERPConfig struct {
|
||||
ServerEnabled bool
|
||||
ServerRegionID int
|
||||
ServerRegionCode string
|
||||
ServerRegionName string
|
||||
STUNAddr string
|
||||
URLs []url.URL
|
||||
Paths []string
|
||||
AutoUpdate bool
|
||||
UpdateFrequency time.Duration
|
||||
}
|
||||
|
||||
type LogTailConfig struct {
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
type CLIConfig struct {
|
||||
Address string
|
||||
APIKey string
|
||||
Timeout time.Duration
|
||||
Insecure bool
|
||||
}
|
||||
|
||||
type ACLConfig struct {
|
||||
PolicyPath string
|
||||
}
|
||||
|
||||
type LogConfig struct {
|
||||
Format string
|
||||
Level zerolog.Level
|
||||
}
|
||||
|
||||
func LoadConfig(path string, isFile bool) error {
|
||||
if isFile {
|
||||
viper.SetConfigFile(path)
|
||||
} else {
|
||||
viper.SetConfigName("config")
|
||||
if path == "" {
|
||||
viper.AddConfigPath("/etc/headscale/")
|
||||
viper.AddConfigPath("$HOME/.headscale")
|
||||
viper.AddConfigPath(".")
|
||||
} else {
|
||||
// For testing
|
||||
viper.AddConfigPath(path)
|
||||
}
|
||||
}
|
||||
|
||||
viper.SetEnvPrefix("headscale")
|
||||
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
||||
viper.AutomaticEnv()
|
||||
|
||||
viper.SetDefault("tls_letsencrypt_cache_dir", "/var/www/.cache")
|
||||
viper.SetDefault("tls_letsencrypt_challenge_type", http01ChallengeType)
|
||||
viper.SetDefault("tls_client_auth_mode", "relaxed")
|
||||
|
||||
viper.SetDefault("log.level", "info")
|
||||
viper.SetDefault("log.format", TextLogFormat)
|
||||
|
||||
viper.SetDefault("dns_config", nil)
|
||||
viper.SetDefault("dns_config.override_local_dns", true)
|
||||
|
||||
viper.SetDefault("derp.server.enabled", false)
|
||||
viper.SetDefault("derp.server.stun.enabled", true)
|
||||
|
||||
viper.SetDefault("unix_socket", "/var/run/headscale.sock")
|
||||
viper.SetDefault("unix_socket_permission", "0o770")
|
||||
|
||||
viper.SetDefault("grpc_listen_addr", ":50443")
|
||||
viper.SetDefault("grpc_allow_insecure", false)
|
||||
|
||||
viper.SetDefault("cli.timeout", "5s")
|
||||
viper.SetDefault("cli.insecure", false)
|
||||
|
||||
viper.SetDefault("oidc.scope", []string{oidc.ScopeOpenID, "profile", "email"})
|
||||
viper.SetDefault("oidc.strip_email_domain", true)
|
||||
viper.SetDefault("oidc.only_start_if_oidc_is_available", true)
|
||||
|
||||
viper.SetDefault("logtail.enabled", false)
|
||||
viper.SetDefault("randomize_client_port", false)
|
||||
|
||||
viper.SetDefault("ephemeral_node_inactivity_timeout", "120s")
|
||||
|
||||
viper.SetDefault("node_update_check_interval", "10s")
|
||||
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
log.Warn().Err(err).Msg("Failed to read configuration from disk")
|
||||
|
||||
return fmt.Errorf("fatal error reading config file: %w", err)
|
||||
}
|
||||
|
||||
// Collect any validation errors and return them all at once
|
||||
var errorText string
|
||||
if (viper.GetString("tls_letsencrypt_hostname") != "") &&
|
||||
((viper.GetString("tls_cert_path") != "") || (viper.GetString("tls_key_path") != "")) {
|
||||
errorText += "Fatal config error: set either tls_letsencrypt_hostname or tls_cert_path/tls_key_path, not both\n"
|
||||
}
|
||||
|
||||
if !viper.IsSet("noise") || viper.GetString("noise.private_key_path") == "" {
|
||||
errorText += "Fatal config error: headscale now requires a new `noise.private_key_path` field in the config file for the Tailscale v2 protocol\n"
|
||||
}
|
||||
|
||||
if (viper.GetString("tls_letsencrypt_hostname") != "") &&
|
||||
(viper.GetString("tls_letsencrypt_challenge_type") == tlsALPN01ChallengeType) &&
|
||||
(!strings.HasSuffix(viper.GetString("listen_addr"), ":443")) {
|
||||
// this is only a warning because there could be something sitting in front of headscale that redirects the traffic (e.g. an iptables rule)
|
||||
log.Warn().
|
||||
Msg("Warning: when using tls_letsencrypt_hostname with TLS-ALPN-01 as challenge type, headscale must be reachable on port 443, i.e. listen_addr should probably end in :443")
|
||||
}
|
||||
|
||||
if (viper.GetString("tls_letsencrypt_challenge_type") != http01ChallengeType) &&
|
||||
(viper.GetString("tls_letsencrypt_challenge_type") != tlsALPN01ChallengeType) {
|
||||
errorText += "Fatal config error: the only supported values for tls_letsencrypt_challenge_type are HTTP-01 and TLS-ALPN-01\n"
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(viper.GetString("server_url"), "http://") &&
|
||||
!strings.HasPrefix(viper.GetString("server_url"), "https://") {
|
||||
errorText += "Fatal config error: server_url must start with https:// or http://\n"
|
||||
}
|
||||
|
||||
_, authModeValid := LookupTLSClientAuthMode(
|
||||
viper.GetString("tls_client_auth_mode"),
|
||||
)
|
||||
|
||||
if !authModeValid {
|
||||
errorText += fmt.Sprintf(
|
||||
"Invalid tls_client_auth_mode supplied: %s. Accepted values: %s, %s, %s.",
|
||||
viper.GetString("tls_client_auth_mode"),
|
||||
DisabledClientAuth,
|
||||
RelaxedClientAuth,
|
||||
EnforcedClientAuth)
|
||||
}
|
||||
|
||||
// Minimum inactivity time out is keepalive timeout (60s) plus a few seconds
|
||||
// to avoid races
|
||||
minInactivityTimeout, _ := time.ParseDuration("65s")
|
||||
if viper.GetDuration("ephemeral_node_inactivity_timeout") <= minInactivityTimeout {
|
||||
errorText += fmt.Sprintf(
|
||||
"Fatal config error: ephemeral_node_inactivity_timeout (%s) is set too low, must be more than %s",
|
||||
viper.GetString("ephemeral_node_inactivity_timeout"),
|
||||
minInactivityTimeout,
|
||||
)
|
||||
}
|
||||
|
||||
maxNodeUpdateCheckInterval, _ := time.ParseDuration("60s")
|
||||
if viper.GetDuration("node_update_check_interval") > maxNodeUpdateCheckInterval {
|
||||
errorText += fmt.Sprintf(
|
||||
"Fatal config error: node_update_check_interval (%s) is set too high, must be less than %s",
|
||||
viper.GetString("node_update_check_interval"),
|
||||
maxNodeUpdateCheckInterval,
|
||||
)
|
||||
}
|
||||
|
||||
if errorText != "" {
|
||||
//nolint
|
||||
return errors.New(strings.TrimSuffix(errorText, "\n"))
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func GetTLSConfig() TLSConfig {
|
||||
tlsClientAuthMode, _ := LookupTLSClientAuthMode(
|
||||
viper.GetString("tls_client_auth_mode"),
|
||||
)
|
||||
|
||||
return TLSConfig{
|
||||
LetsEncrypt: LetsEncryptConfig{
|
||||
Hostname: viper.GetString("tls_letsencrypt_hostname"),
|
||||
Listen: viper.GetString("tls_letsencrypt_listen"),
|
||||
CacheDir: AbsolutePathFromConfigPath(
|
||||
viper.GetString("tls_letsencrypt_cache_dir"),
|
||||
),
|
||||
ChallengeType: viper.GetString("tls_letsencrypt_challenge_type"),
|
||||
},
|
||||
CertPath: AbsolutePathFromConfigPath(
|
||||
viper.GetString("tls_cert_path"),
|
||||
),
|
||||
KeyPath: AbsolutePathFromConfigPath(
|
||||
viper.GetString("tls_key_path"),
|
||||
),
|
||||
ClientAuthMode: tlsClientAuthMode,
|
||||
}
|
||||
}
|
||||
|
||||
func GetDERPConfig() DERPConfig {
|
||||
serverEnabled := viper.GetBool("derp.server.enabled")
|
||||
serverRegionID := viper.GetInt("derp.server.region_id")
|
||||
serverRegionCode := viper.GetString("derp.server.region_code")
|
||||
serverRegionName := viper.GetString("derp.server.region_name")
|
||||
stunAddr := viper.GetString("derp.server.stun_listen_addr")
|
||||
|
||||
if serverEnabled && stunAddr == "" {
|
||||
log.Fatal().
|
||||
Msg("derp.server.stun_listen_addr must be set if derp.server.enabled is true")
|
||||
}
|
||||
|
||||
urlStrs := viper.GetStringSlice("derp.urls")
|
||||
|
||||
urls := make([]url.URL, len(urlStrs))
|
||||
for index, urlStr := range urlStrs {
|
||||
urlAddr, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("url", urlStr).
|
||||
Err(err).
|
||||
Msg("Failed to parse url, ignoring...")
|
||||
}
|
||||
|
||||
urls[index] = *urlAddr
|
||||
}
|
||||
|
||||
paths := viper.GetStringSlice("derp.paths")
|
||||
|
||||
autoUpdate := viper.GetBool("derp.auto_update_enabled")
|
||||
updateFrequency := viper.GetDuration("derp.update_frequency")
|
||||
|
||||
return DERPConfig{
|
||||
ServerEnabled: serverEnabled,
|
||||
ServerRegionID: serverRegionID,
|
||||
ServerRegionCode: serverRegionCode,
|
||||
ServerRegionName: serverRegionName,
|
||||
STUNAddr: stunAddr,
|
||||
URLs: urls,
|
||||
Paths: paths,
|
||||
AutoUpdate: autoUpdate,
|
||||
UpdateFrequency: updateFrequency,
|
||||
}
|
||||
}
|
||||
|
||||
func GetLogTailConfig() LogTailConfig {
|
||||
enabled := viper.GetBool("logtail.enabled")
|
||||
|
||||
return LogTailConfig{
|
||||
Enabled: enabled,
|
||||
}
|
||||
}
|
||||
|
||||
func GetACLConfig() ACLConfig {
|
||||
policyPath := viper.GetString("acl_policy_path")
|
||||
|
||||
return ACLConfig{
|
||||
PolicyPath: policyPath,
|
||||
}
|
||||
}
|
||||
|
||||
func GetLogConfig() LogConfig {
|
||||
logLevelStr := viper.GetString("log.level")
|
||||
logLevel, err := zerolog.ParseLevel(logLevelStr)
|
||||
if err != nil {
|
||||
logLevel = zerolog.DebugLevel
|
||||
}
|
||||
|
||||
logFormatOpt := viper.GetString("log.format")
|
||||
var logFormat string
|
||||
switch logFormatOpt {
|
||||
case "json":
|
||||
logFormat = JSONLogFormat
|
||||
case "text":
|
||||
logFormat = TextLogFormat
|
||||
case "":
|
||||
logFormat = TextLogFormat
|
||||
default:
|
||||
log.Error().
|
||||
Str("func", "GetLogConfig").
|
||||
Msgf("Could not parse log format: %s. Valid choices are 'json' or 'text'", logFormatOpt)
|
||||
}
|
||||
|
||||
return LogConfig{
|
||||
Format: logFormat,
|
||||
Level: logLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func GetDNSConfig() (*tailcfg.DNSConfig, string) {
|
||||
if viper.IsSet("dns_config") {
|
||||
dnsConfig := &tailcfg.DNSConfig{}
|
||||
|
||||
overrideLocalDNS := viper.GetBool("dns_config.override_local_dns")
|
||||
|
||||
if viper.IsSet("dns_config.nameservers") {
|
||||
nameserversStr := viper.GetStringSlice("dns_config.nameservers")
|
||||
|
||||
nameservers := make([]netip.Addr, len(nameserversStr))
|
||||
resolvers := make([]*dnstype.Resolver, len(nameserversStr))
|
||||
|
||||
for index, nameserverStr := range nameserversStr {
|
||||
nameserver, err := netip.ParseAddr(nameserverStr)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "getDNSConfig").
|
||||
Err(err).
|
||||
Msgf("Could not parse nameserver IP: %s", nameserverStr)
|
||||
}
|
||||
|
||||
nameservers[index] = nameserver
|
||||
resolvers[index] = &dnstype.Resolver{
|
||||
Addr: nameserver.String(),
|
||||
}
|
||||
}
|
||||
|
||||
dnsConfig.Nameservers = nameservers
|
||||
|
||||
if overrideLocalDNS {
|
||||
dnsConfig.Resolvers = resolvers
|
||||
} else {
|
||||
dnsConfig.FallbackResolvers = resolvers
|
||||
}
|
||||
}
|
||||
|
||||
if viper.IsSet("dns_config.restricted_nameservers") {
|
||||
if len(dnsConfig.Nameservers) > 0 {
|
||||
dnsConfig.Routes = make(map[string][]*dnstype.Resolver)
|
||||
restrictedDNS := viper.GetStringMapStringSlice(
|
||||
"dns_config.restricted_nameservers",
|
||||
)
|
||||
for domain, restrictedNameservers := range restrictedDNS {
|
||||
restrictedResolvers := make(
|
||||
[]*dnstype.Resolver,
|
||||
len(restrictedNameservers),
|
||||
)
|
||||
for index, nameserverStr := range restrictedNameservers {
|
||||
nameserver, err := netip.ParseAddr(nameserverStr)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "getDNSConfig").
|
||||
Err(err).
|
||||
Msgf("Could not parse restricted nameserver IP: %s", nameserverStr)
|
||||
}
|
||||
restrictedResolvers[index] = &dnstype.Resolver{
|
||||
Addr: nameserver.String(),
|
||||
}
|
||||
}
|
||||
dnsConfig.Routes[domain] = restrictedResolvers
|
||||
}
|
||||
} else {
|
||||
log.Warn().
|
||||
Msg("Warning: dns_config.restricted_nameservers is set, but no nameservers are configured. Ignoring restricted_nameservers.")
|
||||
}
|
||||
}
|
||||
|
||||
if viper.IsSet("dns_config.domains") {
|
||||
domains := viper.GetStringSlice("dns_config.domains")
|
||||
if len(dnsConfig.Nameservers) > 0 {
|
||||
dnsConfig.Domains = domains
|
||||
} else if domains != nil {
|
||||
log.Warn().
|
||||
Msg("Warning: dns_config.domains is set, but no nameservers are configured. Ignoring domains.")
|
||||
}
|
||||
}
|
||||
|
||||
if viper.IsSet("dns_config.magic_dns") {
|
||||
dnsConfig.Proxied = viper.GetBool("dns_config.magic_dns")
|
||||
}
|
||||
|
||||
var baseDomain string
|
||||
if viper.IsSet("dns_config.base_domain") {
|
||||
baseDomain = viper.GetString("dns_config.base_domain")
|
||||
} else {
|
||||
baseDomain = "headscale.net" // does not really matter when MagicDNS is not enabled
|
||||
}
|
||||
|
||||
return dnsConfig, baseDomain
|
||||
}
|
||||
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
func GetHeadscaleConfig() (*Config, error) {
|
||||
dnsConfig, baseDomain := GetDNSConfig()
|
||||
derpConfig := GetDERPConfig()
|
||||
logConfig := GetLogTailConfig()
|
||||
randomizeClientPort := viper.GetBool("randomize_client_port")
|
||||
|
||||
configuredPrefixes := viper.GetStringSlice("ip_prefixes")
|
||||
parsedPrefixes := make([]netip.Prefix, 0, len(configuredPrefixes)+1)
|
||||
|
||||
for i, prefixInConfig := range configuredPrefixes {
|
||||
prefix, err := netip.ParsePrefix(prefixInConfig)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to parse ip_prefixes[%d]: %w", i, err))
|
||||
}
|
||||
parsedPrefixes = append(parsedPrefixes, prefix)
|
||||
}
|
||||
|
||||
prefixes := make([]netip.Prefix, 0, len(parsedPrefixes))
|
||||
{
|
||||
// dedup
|
||||
normalizedPrefixes := make(map[string]int, len(parsedPrefixes))
|
||||
for i, p := range parsedPrefixes {
|
||||
normalized, _ := netipx.RangeOfPrefix(p).Prefix()
|
||||
normalizedPrefixes[normalized.String()] = i
|
||||
}
|
||||
|
||||
// convert back to list
|
||||
for _, i := range normalizedPrefixes {
|
||||
prefixes = append(prefixes, parsedPrefixes[i])
|
||||
}
|
||||
}
|
||||
|
||||
if len(prefixes) < 1 {
|
||||
prefixes = append(prefixes, netip.MustParsePrefix("100.64.0.0/10"))
|
||||
log.Warn().
|
||||
Msgf("'ip_prefixes' not configured, falling back to default: %v", prefixes)
|
||||
}
|
||||
|
||||
return &Config{
|
||||
ServerURL: viper.GetString("server_url"),
|
||||
Addr: viper.GetString("listen_addr"),
|
||||
MetricsAddr: viper.GetString("metrics_listen_addr"),
|
||||
GRPCAddr: viper.GetString("grpc_listen_addr"),
|
||||
GRPCAllowInsecure: viper.GetBool("grpc_allow_insecure"),
|
||||
DisableUpdateCheck: viper.GetBool("disable_check_updates"),
|
||||
|
||||
IPPrefixes: prefixes,
|
||||
PrivateKeyPath: AbsolutePathFromConfigPath(
|
||||
viper.GetString("private_key_path"),
|
||||
),
|
||||
NoisePrivateKeyPath: AbsolutePathFromConfigPath(
|
||||
viper.GetString("noise.private_key_path"),
|
||||
),
|
||||
BaseDomain: baseDomain,
|
||||
|
||||
DERP: derpConfig,
|
||||
|
||||
EphemeralNodeInactivityTimeout: viper.GetDuration(
|
||||
"ephemeral_node_inactivity_timeout",
|
||||
),
|
||||
|
||||
NodeUpdateCheckInterval: viper.GetDuration(
|
||||
"node_update_check_interval",
|
||||
),
|
||||
|
||||
DBtype: viper.GetString("db_type"),
|
||||
DBpath: AbsolutePathFromConfigPath(viper.GetString("db_path")),
|
||||
DBhost: viper.GetString("db_host"),
|
||||
DBport: viper.GetInt("db_port"),
|
||||
DBname: viper.GetString("db_name"),
|
||||
DBuser: viper.GetString("db_user"),
|
||||
DBpass: viper.GetString("db_pass"),
|
||||
DBssl: viper.GetBool("db_ssl"),
|
||||
|
||||
TLS: GetTLSConfig(),
|
||||
|
||||
DNSConfig: dnsConfig,
|
||||
|
||||
ACMEEmail: viper.GetString("acme_email"),
|
||||
ACMEURL: viper.GetString("acme_url"),
|
||||
|
||||
UnixSocket: viper.GetString("unix_socket"),
|
||||
UnixSocketPermission: GetFileMode("unix_socket_permission"),
|
||||
|
||||
OIDC: OIDCConfig{
|
||||
OnlyStartIfOIDCIsAvailable: viper.GetBool(
|
||||
"oidc.only_start_if_oidc_is_available",
|
||||
),
|
||||
Issuer: viper.GetString("oidc.issuer"),
|
||||
ClientID: viper.GetString("oidc.client_id"),
|
||||
ClientSecret: viper.GetString("oidc.client_secret"),
|
||||
Scope: viper.GetStringSlice("oidc.scope"),
|
||||
ExtraParams: viper.GetStringMapString("oidc.extra_params"),
|
||||
AllowedDomains: viper.GetStringSlice("oidc.allowed_domains"),
|
||||
AllowedUsers: viper.GetStringSlice("oidc.allowed_users"),
|
||||
StripEmaildomain: viper.GetBool("oidc.strip_email_domain"),
|
||||
},
|
||||
|
||||
LogTail: logConfig,
|
||||
RandomizeClientPort: randomizeClientPort,
|
||||
|
||||
CLI: CLIConfig{
|
||||
Address: viper.GetString("cli.address"),
|
||||
APIKey: viper.GetString("cli.api_key"),
|
||||
Timeout: viper.GetDuration("cli.timeout"),
|
||||
Insecure: viper.GetBool("cli.insecure"),
|
||||
},
|
||||
|
||||
ACL: GetACLConfig(),
|
||||
|
||||
Log: GetLogConfig(),
|
||||
}, nil
|
||||
}
|
||||
73
db.go
73
db.go
@@ -1,10 +1,12 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"time"
|
||||
|
||||
"github.com/glebarez/sqlite"
|
||||
@@ -12,7 +14,6 @@ import (
|
||||
"gorm.io/driver/postgres"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/logger"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
@@ -39,6 +40,12 @@ func (h *Headscale) initDB() error {
|
||||
}
|
||||
|
||||
_ = db.Migrator().RenameColumn(&Machine{}, "ip_address", "ip_addresses")
|
||||
_ = db.Migrator().RenameColumn(&Machine{}, "name", "hostname")
|
||||
|
||||
// GivenName is used as the primary source of DNS names, make sure
|
||||
// the field is populated and normalized if it was not when the
|
||||
// machine was registered.
|
||||
_ = db.Migrator().RenameColumn(&Machine{}, "nickname", "given_name")
|
||||
|
||||
// If the Machine table has a column for registered,
|
||||
// find all occourences of "false" and drop them. Then
|
||||
@@ -54,13 +61,13 @@ func (h *Headscale) initDB() error {
|
||||
|
||||
for _, machine := range machines {
|
||||
log.Info().
|
||||
Str("machine", machine.Name).
|
||||
Str("machine", machine.Hostname).
|
||||
Str("machine_key", machine.MachineKey).
|
||||
Msg("Deleting unregistered machine")
|
||||
if err := h.db.Delete(&Machine{}, machine.ID).Error; err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("machine", machine.Name).
|
||||
Str("machine", machine.Hostname).
|
||||
Str("machine_key", machine.MachineKey).
|
||||
Msg("Error deleting unregistered machine")
|
||||
}
|
||||
@@ -77,6 +84,38 @@ func (h *Headscale) initDB() error {
|
||||
return err
|
||||
}
|
||||
|
||||
if db.Migrator().HasColumn(&Machine{}, "given_name") {
|
||||
machines := Machines{}
|
||||
if err := h.db.Find(&machines).Error; err != nil {
|
||||
log.Error().Err(err).Msg("Error accessing db")
|
||||
}
|
||||
|
||||
for item, machine := range machines {
|
||||
if machine.GivenName == "" {
|
||||
normalizedHostname, err := NormalizeToFQDNRules(
|
||||
machine.Hostname,
|
||||
h.cfg.OIDC.StripEmaildomain,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("hostname", machine.Hostname).
|
||||
Err(err).
|
||||
Msg("Failed to normalize machine hostname in DB migration")
|
||||
}
|
||||
|
||||
err = h.RenameMachine(&machines[item], normalizedHostname)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("hostname", machine.Hostname).
|
||||
Err(err).
|
||||
Msg("Failed to save normalized machine name in DB migration")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = db.AutoMigrate(&KV{})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -92,6 +131,11 @@ func (h *Headscale) initDB() error {
|
||||
return err
|
||||
}
|
||||
|
||||
err = db.AutoMigrate(&PreAuthKeyACLTag{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_ = db.Migrator().DropTable("shared_machines")
|
||||
|
||||
err = db.AutoMigrate(&APIKey{})
|
||||
@@ -175,11 +219,24 @@ func (h *Headscale) setValue(key string, value string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
h.db.Create(keyValue)
|
||||
if err := h.db.Create(keyValue).Error; err != nil {
|
||||
return fmt.Errorf("failed to create key value pair in the database: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Headscale) pingDB(ctx context.Context) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
db, err := h.db.DB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return db.PingContext(ctx)
|
||||
}
|
||||
|
||||
// This is a "wrapper" type around tailscales
|
||||
// Hostinfo to allow us to add database "serialization"
|
||||
// methods. This allows us to use a typed values throughout
|
||||
@@ -196,7 +253,7 @@ func (hi *HostInfo) Scan(destination interface{}) error {
|
||||
return json.Unmarshal([]byte(value), hi)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("%w: unexpected data type %T", errMachineAddressesInvalid, destination)
|
||||
return fmt.Errorf("%w: unexpected data type %T", ErrMachineAddressesInvalid, destination)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -207,7 +264,7 @@ func (hi HostInfo) Value() (driver.Value, error) {
|
||||
return string(bytes), err
|
||||
}
|
||||
|
||||
type IPPrefixes []netaddr.IPPrefix
|
||||
type IPPrefixes []netip.Prefix
|
||||
|
||||
func (i *IPPrefixes) Scan(destination interface{}) error {
|
||||
switch value := destination.(type) {
|
||||
@@ -218,7 +275,7 @@ func (i *IPPrefixes) Scan(destination interface{}) error {
|
||||
return json.Unmarshal([]byte(value), i)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("%w: unexpected data type %T", errMachineAddressesInvalid, destination)
|
||||
return fmt.Errorf("%w: unexpected data type %T", ErrMachineAddressesInvalid, destination)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -240,7 +297,7 @@ func (i *StringList) Scan(destination interface{}) error {
|
||||
return json.Unmarshal([]byte(value), i)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("%w: unexpected data type %T", errMachineAddressesInvalid, destination)
|
||||
return fmt.Errorf("%w: unexpected data type %T", ErrMachineAddressesInvalid, destination)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
16
derp.go
16
derp.go
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -35,7 +34,7 @@ func loadDERPMapFromURL(addr url.URL) (*tailcfg.DERPMap, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), HTTPReadTimeout)
|
||||
defer cancel()
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", addr.String(), nil)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, addr.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -50,7 +49,7 @@ func loadDERPMapFromURL(addr url.URL) (*tailcfg.DERPMap, error) {
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -152,16 +151,7 @@ func (h *Headscale) scheduledDERPMapUpdateWorker(cancelChan <-chan struct{}) {
|
||||
h.DERPMap.Regions[h.DERPServer.region.RegionID] = &h.DERPServer.region
|
||||
}
|
||||
|
||||
namespaces, err := h.ListNamespaces()
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Msg("Failed to fetch namespaces")
|
||||
}
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
h.setLastStateChangeToNow(namespace.Name)
|
||||
}
|
||||
h.setLastStateChangeToNow()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
113
derp_server.go
113
derp_server.go
@@ -2,15 +2,16 @@ package headscale
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/rs/zerolog/log"
|
||||
"tailscale.com/derp"
|
||||
"tailscale.com/net/stun"
|
||||
@@ -30,6 +31,7 @@ type DERPServer struct {
|
||||
}
|
||||
|
||||
func (h *Headscale) NewDERPServer() (*DERPServer, error) {
|
||||
log.Trace().Caller().Msg("Creating new embedded DERP server")
|
||||
server := derp.NewServer(key.NodePrivate(*h.privateKey), log.Info().Msgf)
|
||||
region, err := h.generateRegionLocalDERP()
|
||||
if err != nil {
|
||||
@@ -87,27 +89,51 @@ func (h *Headscale) generateRegionLocalDERP() (tailcfg.DERPRegion, error) {
|
||||
}
|
||||
localDERPregion.Nodes[0].STUNPort = portSTUN
|
||||
|
||||
log.Info().Caller().Msgf("DERP region: %+v", localDERPregion)
|
||||
|
||||
return localDERPregion, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) DERPHandler(ctx *gin.Context) {
|
||||
log.Trace().Caller().Msgf("/derp request from %v", ctx.ClientIP())
|
||||
up := strings.ToLower(ctx.Request.Header.Get("Upgrade"))
|
||||
if up != "websocket" && up != "derp" {
|
||||
if up != "" {
|
||||
log.Warn().Caller().Msgf("Weird websockets connection upgrade: %q", up)
|
||||
func (h *Headscale) DERPHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
log.Trace().Caller().Msgf("/derp request from %v", req.RemoteAddr)
|
||||
upgrade := strings.ToLower(req.Header.Get("Upgrade"))
|
||||
|
||||
if upgrade != "websocket" && upgrade != "derp" {
|
||||
if upgrade != "" {
|
||||
log.Warn().
|
||||
Caller().
|
||||
Msg("No Upgrade header in DERP server request. If headscale is behind a reverse proxy, make sure it is configured to pass WebSockets through.")
|
||||
}
|
||||
writer.Header().Set("Content-Type", "text/plain")
|
||||
writer.WriteHeader(http.StatusUpgradeRequired)
|
||||
_, err := writer.Write([]byte("DERP requires connection upgrade"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
ctx.String(http.StatusUpgradeRequired, "DERP requires connection upgrade")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
fastStart := ctx.Request.Header.Get(fastStartHeader) == "1"
|
||||
fastStart := req.Header.Get(fastStartHeader) == "1"
|
||||
|
||||
hijacker, ok := ctx.Writer.(http.Hijacker)
|
||||
hijacker, ok := writer.(http.Hijacker)
|
||||
if !ok {
|
||||
log.Error().Caller().Msg("DERP requires Hijacker interface from Gin")
|
||||
ctx.String(http.StatusInternalServerError, "HTTP does not support general TCP support")
|
||||
writer.Header().Set("Content-Type", "text/plain")
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
_, err := writer.Write([]byte("HTTP does not support general TCP support"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
@@ -115,14 +141,23 @@ func (h *Headscale) DERPHandler(ctx *gin.Context) {
|
||||
netConn, conn, err := hijacker.Hijack()
|
||||
if err != nil {
|
||||
log.Error().Caller().Err(err).Msgf("Hijack failed")
|
||||
ctx.String(http.StatusInternalServerError, "HTTP does not support general TCP support")
|
||||
writer.Header().Set("Content-Type", "text/plain")
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
_, err = writer.Write([]byte("HTTP does not support general TCP support"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
log.Trace().Caller().Msgf("Hijacked connection from %v", req.RemoteAddr)
|
||||
|
||||
if !fastStart {
|
||||
pubKey := h.privateKey.Public()
|
||||
pubKeyStr := pubKey.UntypedHexString() // nolint
|
||||
pubKeyStr := pubKey.UntypedHexString() //nolint
|
||||
fmt.Fprintf(conn, "HTTP/1.1 101 Switching Protocols\r\n"+
|
||||
"Upgrade: DERP\r\n"+
|
||||
"Connection: Upgrade\r\n"+
|
||||
@@ -132,17 +167,28 @@ func (h *Headscale) DERPHandler(ctx *gin.Context) {
|
||||
pubKeyStr)
|
||||
}
|
||||
|
||||
h.DERPServer.tailscaleDERP.Accept(netConn, conn, netConn.RemoteAddr().String())
|
||||
h.DERPServer.tailscaleDERP.Accept(req.Context(), netConn, conn, netConn.RemoteAddr().String())
|
||||
}
|
||||
|
||||
// DERPProbeHandler is the endpoint that js/wasm clients hit to measure
|
||||
// DERP latency, since they can't do UDP STUN queries.
|
||||
func (h *Headscale) DERPProbeHandler(ctx *gin.Context) {
|
||||
switch ctx.Request.Method {
|
||||
case "HEAD", "GET":
|
||||
ctx.Writer.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
func (h *Headscale) DERPProbeHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
switch req.Method {
|
||||
case http.MethodHead, http.MethodGet:
|
||||
writer.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
default:
|
||||
ctx.String(http.StatusMethodNotAllowed, "bogus probe method")
|
||||
writer.WriteHeader(http.StatusMethodNotAllowed)
|
||||
_, err := writer.Write([]byte("bogus probe method"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -153,24 +199,38 @@ func (h *Headscale) DERPProbeHandler(ctx *gin.Context) {
|
||||
// The initial implementation is here https://github.com/tailscale/tailscale/pull/1406
|
||||
// They have a cache, but not clear if that is really necessary at Headscale, uh, scale.
|
||||
// An example implementation is found here https://derp.tailscale.com/bootstrap-dns
|
||||
func (h *Headscale) DERPBootstrapDNSHandler(ctx *gin.Context) {
|
||||
func (h *Headscale) DERPBootstrapDNSHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
dnsEntries := make(map[string][]net.IP)
|
||||
|
||||
resolvCtx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
resolvCtx, cancel := context.WithTimeout(req.Context(), time.Minute)
|
||||
defer cancel()
|
||||
var r net.Resolver
|
||||
var resolver net.Resolver
|
||||
for _, region := range h.DERPMap.Regions {
|
||||
for _, node := range region.Nodes { // we don't care if we override some nodes
|
||||
addrs, err := r.LookupIP(resolvCtx, "ip", node.HostName)
|
||||
addrs, err := resolver.LookupIP(resolvCtx, "ip", node.HostName)
|
||||
if err != nil {
|
||||
log.Trace().Caller().Err(err).Msgf("bootstrap DNS lookup failed %q", node.HostName)
|
||||
log.Trace().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msgf("bootstrap DNS lookup failed %q", node.HostName)
|
||||
|
||||
continue
|
||||
}
|
||||
dnsEntries[node.HostName] = addrs
|
||||
}
|
||||
}
|
||||
ctx.JSON(http.StatusOK, dnsEntries)
|
||||
writer.Header().Set("Content-Type", "application/json")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
err := json.NewEncoder(writer).Encode(dnsEntries)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
}
|
||||
|
||||
// ServeSTUN starts a STUN server on the configured addr.
|
||||
@@ -220,7 +280,8 @@ func serverSTUNListener(ctx context.Context, packetConn *net.UDPConn) {
|
||||
continue
|
||||
}
|
||||
|
||||
res := stun.Response(txid, udpAddr.IP, uint16(udpAddr.Port))
|
||||
addr, _ := netip.AddrFromSlice(udpAddr.IP)
|
||||
res := stun.Response(txid, netip.AddrPortFrom(addr, uint16(udpAddr.Port)))
|
||||
_, err = packetConn.WriteTo(res, udpAddr)
|
||||
if err != nil {
|
||||
log.Trace().Caller().Err(err).Msgf("Issue writing to UDP")
|
||||
|
||||
33
dns.go
33
dns.go
@@ -2,10 +2,11 @@ package headscale
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"strings"
|
||||
|
||||
"github.com/fatih/set"
|
||||
"inet.af/netaddr"
|
||||
mapset "github.com/deckarep/golang-set/v2"
|
||||
"go4.org/netipx"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/dnsname"
|
||||
)
|
||||
@@ -39,11 +40,11 @@ const (
|
||||
|
||||
// From the netmask we can find out the wildcard bits (the bits that are not set in the netmask).
|
||||
// This allows us to then calculate the subnets included in the subsequent class block and generate the entries.
|
||||
func generateMagicDNSRootDomains(ipPrefixes []netaddr.IPPrefix) []dnsname.FQDN {
|
||||
func generateMagicDNSRootDomains(ipPrefixes []netip.Prefix) []dnsname.FQDN {
|
||||
fqdns := make([]dnsname.FQDN, 0, len(ipPrefixes))
|
||||
for _, ipPrefix := range ipPrefixes {
|
||||
var generateDNSRoot func(netaddr.IPPrefix) []dnsname.FQDN
|
||||
switch ipPrefix.IP().BitLen() {
|
||||
var generateDNSRoot func(netip.Prefix) []dnsname.FQDN
|
||||
switch ipPrefix.Addr().BitLen() {
|
||||
case ipv4AddressLength:
|
||||
generateDNSRoot = generateIPv4DNSRootDomain
|
||||
|
||||
@@ -54,7 +55,7 @@ func generateMagicDNSRootDomains(ipPrefixes []netaddr.IPPrefix) []dnsname.FQDN {
|
||||
panic(
|
||||
fmt.Sprintf(
|
||||
"unsupported IP version with address length %d",
|
||||
ipPrefix.IP().BitLen(),
|
||||
ipPrefix.Addr().BitLen(),
|
||||
),
|
||||
)
|
||||
}
|
||||
@@ -65,9 +66,9 @@ func generateMagicDNSRootDomains(ipPrefixes []netaddr.IPPrefix) []dnsname.FQDN {
|
||||
return fqdns
|
||||
}
|
||||
|
||||
func generateIPv4DNSRootDomain(ipPrefix netaddr.IPPrefix) []dnsname.FQDN {
|
||||
func generateIPv4DNSRootDomain(ipPrefix netip.Prefix) []dnsname.FQDN {
|
||||
// Conversion to the std lib net.IPnet, a bit easier to operate
|
||||
netRange := ipPrefix.IPNet()
|
||||
netRange := netipx.PrefixIPNet(ipPrefix)
|
||||
maskBits, _ := netRange.Mask.Size()
|
||||
|
||||
// lastOctet is the last IP byte covered by the mask
|
||||
@@ -101,11 +102,11 @@ func generateIPv4DNSRootDomain(ipPrefix netaddr.IPPrefix) []dnsname.FQDN {
|
||||
return fqdns
|
||||
}
|
||||
|
||||
func generateIPv6DNSRootDomain(ipPrefix netaddr.IPPrefix) []dnsname.FQDN {
|
||||
func generateIPv6DNSRootDomain(ipPrefix netip.Prefix) []dnsname.FQDN {
|
||||
const nibbleLen = 4
|
||||
|
||||
maskBits, _ := ipPrefix.IPNet().Mask.Size()
|
||||
expanded := ipPrefix.IP().StringExpanded()
|
||||
maskBits, _ := netipx.PrefixIPNet(ipPrefix).Mask.Size()
|
||||
expanded := ipPrefix.Addr().StringExpanded()
|
||||
nibbleStr := strings.Map(func(r rune) rune {
|
||||
if r == ':' {
|
||||
return -1
|
||||
@@ -170,18 +171,12 @@ func getMapResponseDNSConfig(
|
||||
),
|
||||
)
|
||||
|
||||
namespaceSet := set.New(set.ThreadSafe)
|
||||
namespaceSet := mapset.NewSet[Namespace]()
|
||||
namespaceSet.Add(machine.Namespace)
|
||||
for _, p := range peers {
|
||||
namespaceSet.Add(p.Namespace)
|
||||
}
|
||||
for _, ns := range namespaceSet.List() {
|
||||
namespace, ok := ns.(Namespace)
|
||||
if !ok {
|
||||
dnsConfig = dnsConfigOrig
|
||||
|
||||
continue
|
||||
}
|
||||
for _, namespace := range namespaceSet.ToSlice() {
|
||||
dnsRoute := fmt.Sprintf("%v.%v", namespace.Name, baseDomain)
|
||||
dnsConfig.Routes[dnsRoute] = nil
|
||||
}
|
||||
|
||||
74
dns_test.go
74
dns_test.go
@@ -2,16 +2,16 @@ package headscale
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/dnstype"
|
||||
)
|
||||
|
||||
func (s *Suite) TestMagicDNSRootDomains100(c *check.C) {
|
||||
prefixes := []netaddr.IPPrefix{
|
||||
netaddr.MustParseIPPrefix("100.64.0.0/10"),
|
||||
prefixes := []netip.Prefix{
|
||||
netip.MustParsePrefix("100.64.0.0/10"),
|
||||
}
|
||||
domains := generateMagicDNSRootDomains(prefixes)
|
||||
|
||||
@@ -47,8 +47,8 @@ func (s *Suite) TestMagicDNSRootDomains100(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestMagicDNSRootDomains172(c *check.C) {
|
||||
prefixes := []netaddr.IPPrefix{
|
||||
netaddr.MustParseIPPrefix("172.16.0.0/16"),
|
||||
prefixes := []netip.Prefix{
|
||||
netip.MustParsePrefix("172.16.0.0/16"),
|
||||
}
|
||||
domains := generateMagicDNSRootDomains(prefixes)
|
||||
|
||||
@@ -75,8 +75,8 @@ func (s *Suite) TestMagicDNSRootDomains172(c *check.C) {
|
||||
|
||||
// Happens when netmask is a multiple of 4 bits (sounds likely).
|
||||
func (s *Suite) TestMagicDNSRootDomainsIPv6Single(c *check.C) {
|
||||
prefixes := []netaddr.IPPrefix{
|
||||
netaddr.MustParseIPPrefix("fd7a:115c:a1e0::/48"),
|
||||
prefixes := []netip.Prefix{
|
||||
netip.MustParsePrefix("fd7a:115c:a1e0::/48"),
|
||||
}
|
||||
domains := generateMagicDNSRootDomains(prefixes)
|
||||
|
||||
@@ -89,8 +89,8 @@ func (s *Suite) TestMagicDNSRootDomainsIPv6Single(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestMagicDNSRootDomainsIPv6SingleMultiple(c *check.C) {
|
||||
prefixes := []netaddr.IPPrefix{
|
||||
netaddr.MustParseIPPrefix("fd7a:115c:a1e0::/50"),
|
||||
prefixes := []netip.Prefix{
|
||||
netip.MustParsePrefix("fd7a:115c:a1e0::/50"),
|
||||
}
|
||||
domains := generateMagicDNSRootDomains(prefixes)
|
||||
|
||||
@@ -126,6 +126,7 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
@@ -134,6 +135,7 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
@@ -142,6 +144,7 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
@@ -150,6 +153,7 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
@@ -161,16 +165,16 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
DiscoKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
Name: "test_get_shared_nodes_1",
|
||||
Hostname: "test_get_shared_nodes_1",
|
||||
NamespaceID: namespaceShared1.ID,
|
||||
Namespace: *namespaceShared1,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.1")},
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")},
|
||||
AuthKeyID: uint(preAuthKeyInShared1.ID),
|
||||
}
|
||||
app.db.Save(machineInShared1)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared1.Name, machineInShared1.Name)
|
||||
_, err = app.GetMachine(namespaceShared1.Name, machineInShared1.Hostname)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machineInShared2 := &Machine{
|
||||
@@ -178,16 +182,16 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_2",
|
||||
Hostname: "test_get_shared_nodes_2",
|
||||
NamespaceID: namespaceShared2.ID,
|
||||
Namespace: *namespaceShared2,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.2")},
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")},
|
||||
AuthKeyID: uint(preAuthKeyInShared2.ID),
|
||||
}
|
||||
app.db.Save(machineInShared2)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared2.Name, machineInShared2.Name)
|
||||
_, err = app.GetMachine(namespaceShared2.Name, machineInShared2.Hostname)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machineInShared3 := &Machine{
|
||||
@@ -195,16 +199,16 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_3",
|
||||
Hostname: "test_get_shared_nodes_3",
|
||||
NamespaceID: namespaceShared3.ID,
|
||||
Namespace: *namespaceShared3,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.3")},
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")},
|
||||
AuthKeyID: uint(preAuthKeyInShared3.ID),
|
||||
}
|
||||
app.db.Save(machineInShared3)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared3.Name, machineInShared3.Name)
|
||||
_, err = app.GetMachine(namespaceShared3.Name, machineInShared3.Hostname)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machine2InShared1 := &Machine{
|
||||
@@ -212,18 +216,18 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_4",
|
||||
Hostname: "test_get_shared_nodes_4",
|
||||
NamespaceID: namespaceShared1.ID,
|
||||
Namespace: *namespaceShared1,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.4")},
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.4")},
|
||||
AuthKeyID: uint(PreAuthKey2InShared1.ID),
|
||||
}
|
||||
app.db.Save(machine2InShared1)
|
||||
|
||||
baseDomain := "foobar.headscale.net"
|
||||
dnsConfigOrig := tailcfg.DNSConfig{
|
||||
Routes: make(map[string][]dnstype.Resolver),
|
||||
Routes: make(map[string][]*dnstype.Resolver),
|
||||
Domains: []string{baseDomain},
|
||||
Proxied: true,
|
||||
}
|
||||
@@ -269,6 +273,7 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
@@ -277,6 +282,7 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
@@ -285,6 +291,7 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
@@ -293,6 +300,7 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
@@ -304,16 +312,16 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
DiscoKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
Name: "test_get_shared_nodes_1",
|
||||
Hostname: "test_get_shared_nodes_1",
|
||||
NamespaceID: namespaceShared1.ID,
|
||||
Namespace: *namespaceShared1,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.1")},
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")},
|
||||
AuthKeyID: uint(preAuthKeyInShared1.ID),
|
||||
}
|
||||
app.db.Save(machineInShared1)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared1.Name, machineInShared1.Name)
|
||||
_, err = app.GetMachine(namespaceShared1.Name, machineInShared1.Hostname)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machineInShared2 := &Machine{
|
||||
@@ -321,16 +329,16 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_2",
|
||||
Hostname: "test_get_shared_nodes_2",
|
||||
NamespaceID: namespaceShared2.ID,
|
||||
Namespace: *namespaceShared2,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.2")},
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")},
|
||||
AuthKeyID: uint(preAuthKeyInShared2.ID),
|
||||
}
|
||||
app.db.Save(machineInShared2)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared2.Name, machineInShared2.Name)
|
||||
_, err = app.GetMachine(namespaceShared2.Name, machineInShared2.Hostname)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machineInShared3 := &Machine{
|
||||
@@ -338,16 +346,16 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_3",
|
||||
Hostname: "test_get_shared_nodes_3",
|
||||
NamespaceID: namespaceShared3.ID,
|
||||
Namespace: *namespaceShared3,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.3")},
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")},
|
||||
AuthKeyID: uint(preAuthKeyInShared3.ID),
|
||||
}
|
||||
app.db.Save(machineInShared3)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared3.Name, machineInShared3.Name)
|
||||
_, err = app.GetMachine(namespaceShared3.Name, machineInShared3.Hostname)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machine2InShared1 := &Machine{
|
||||
@@ -355,18 +363,18 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_4",
|
||||
Hostname: "test_get_shared_nodes_4",
|
||||
NamespaceID: namespaceShared1.ID,
|
||||
Namespace: *namespaceShared1,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.4")},
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.4")},
|
||||
AuthKeyID: uint(preAuthKey2InShared1.ID),
|
||||
}
|
||||
app.db.Save(machine2InShared1)
|
||||
|
||||
baseDomain := "foobar.headscale.net"
|
||||
dnsConfigOrig := tailcfg.DNSConfig{
|
||||
Routes: make(map[string][]dnstype.Resolver),
|
||||
Routes: make(map[string][]*dnstype.Resolver),
|
||||
Domains: []string{baseDomain},
|
||||
Proxied: false,
|
||||
}
|
||||
|
||||
@@ -27,6 +27,8 @@ written by community members. It is _not_ verified by `headscale` developers.
|
||||
**It might be outdated and it might miss necessary steps**.
|
||||
|
||||
- [Running headscale in a container](running-headscale-container.md)
|
||||
- [Running headscale on OpenBSD](running-headscale-openbsd.md)
|
||||
- [Running headscale behind a reverse proxy](reverse-proxy.md)
|
||||
|
||||
## Misc
|
||||
|
||||
|
||||
103
docs/acls.md
103
docs/acls.md
@@ -5,12 +5,16 @@ ACL's are the most useful).
|
||||
|
||||
We have a small company with a boss, an admin, two developers and an intern.
|
||||
|
||||
The boss should have access to all servers but not to the users hosts. Admin
|
||||
The boss should have access to all servers but not to the user's hosts. Admin
|
||||
should also have access to all hosts except that their permissions should be
|
||||
limited to maintaining the hosts (for example purposes). The developers can do
|
||||
anything they want on dev hosts, but only watch on productions hosts. Intern
|
||||
anything they want on dev hosts but only watch on productions hosts. Intern
|
||||
can only interact with the development servers.
|
||||
|
||||
There's an additional server that acts as a router, connecting the VPN users
|
||||
to an internal network `10.20.0.0/16`. Developers must have access to those
|
||||
internal resources.
|
||||
|
||||
Each user have at least a device connected to the network and we have some
|
||||
servers.
|
||||
|
||||
@@ -19,23 +23,20 @@ servers.
|
||||
- app-server1.prod
|
||||
- app-server1.dev
|
||||
- billing.internal
|
||||
- router.internal
|
||||
|
||||
## Setup of the network
|
||||

|
||||
|
||||
Let's create the namespaces. Each user should have his own namespace. The users
|
||||
here are represented as namespaces.
|
||||
## ACL setup
|
||||
|
||||
```bash
|
||||
headscale namespaces create boss
|
||||
headscale namespaces create admin1
|
||||
headscale namespaces create dev1
|
||||
headscale namespaces create dev2
|
||||
headscale namespaces create intern1
|
||||
```
|
||||
Note: Namespaces will be created automatically when users authenticate with the
|
||||
Headscale server.
|
||||
|
||||
We don't need to create namespaces for the servers because the servers will be
|
||||
tagged. When registering the servers we will need to add the flag
|
||||
`--advertised-tags=tag:<tag1>,tag:<tag2>`, and the user (namespace) that is
|
||||
ACLs could be written either on [huJSON](https://github.com/tailscale/hujson)
|
||||
or YAML. Check the [test ACLs](../tests/acls) for further information.
|
||||
|
||||
When registering the servers we will need to add the flag
|
||||
`--advertise-tags=tag:<tag1>,tag:<tag2>`, and the user (namespace) that is
|
||||
registering the server should be allowed to do it. Since anyone can add tags to
|
||||
a server they can register, the check of the tags is done on headscale server
|
||||
and only valid tags are applied. A tag is valid if the namespace that is
|
||||
@@ -70,12 +71,20 @@ Here are the ACL's to implement the same permissions as above:
|
||||
|
||||
// interns cannot add servers
|
||||
},
|
||||
// hosts should be defined using its IP addresses and a subnet mask.
|
||||
// to define a single host, use a /32 mask. You cannot use DNS entries here,
|
||||
// as they're prone to be hijacked by replacing their IP addresses.
|
||||
// see https://github.com/tailscale/tailscale/issues/3800 for more information.
|
||||
"Hosts": {
|
||||
"postgresql.internal": "10.20.0.2/32",
|
||||
"webservers.internal": "10.20.10.1/29"
|
||||
},
|
||||
"acls": [
|
||||
// boss have access to all servers
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["group:boss"],
|
||||
"ports": [
|
||||
"src": ["group:boss"],
|
||||
"dst": [
|
||||
"tag:prod-databases:*",
|
||||
"tag:prod-app-servers:*",
|
||||
"tag:internal:*",
|
||||
@@ -84,11 +93,12 @@ Here are the ACL's to implement the same permissions as above:
|
||||
]
|
||||
},
|
||||
|
||||
// admin have only access to administrative ports of the servers
|
||||
// admin have only access to administrative ports of the servers, in tcp/22
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["group:admin"],
|
||||
"ports": [
|
||||
"src": ["group:admin"],
|
||||
"proto": "tcp",
|
||||
"dst": [
|
||||
"tag:prod-databases:22",
|
||||
"tag:prod-app-servers:22",
|
||||
"tag:internal:22",
|
||||
@@ -97,45 +107,70 @@ Here are the ACL's to implement the same permissions as above:
|
||||
]
|
||||
},
|
||||
|
||||
// we also allow admin to ping the servers
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:admin"],
|
||||
"proto": "icmp",
|
||||
"dst": [
|
||||
"tag:prod-databases:*",
|
||||
"tag:prod-app-servers:*",
|
||||
"tag:internal:*",
|
||||
"tag:dev-databases:*",
|
||||
"tag:dev-app-servers:*"
|
||||
]
|
||||
},
|
||||
|
||||
// developers have access to databases servers and application servers on all ports
|
||||
// they can only view the applications servers in prod and have no access to databases servers in production
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["group:dev"],
|
||||
"ports": [
|
||||
"src": ["group:dev"],
|
||||
"dst": [
|
||||
"tag:dev-databases:*",
|
||||
"tag:dev-app-servers:*",
|
||||
"tag:prod-app-servers:80,443"
|
||||
]
|
||||
},
|
||||
// developers have access to the internal network through the router.
|
||||
// the internal network is composed of HTTPS endpoints and Postgresql
|
||||
// database servers. There's an additional rule to allow traffic to be
|
||||
// forwarded to the internal subnet, 10.20.0.0/16. See this issue
|
||||
// https://github.com/juanfont/headscale/issues/502
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:dev"],
|
||||
"dst": ["10.20.0.0/16:443,5432", "router.internal:0"]
|
||||
},
|
||||
|
||||
// servers should be able to talk to database. Database should not be able to initiate connections to
|
||||
// servers should be able to talk to database in tcp/5432. Database should not be able to initiate connections to
|
||||
// applications servers
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["tag:dev-app-servers"],
|
||||
"ports": ["tag:dev-databases:5432"]
|
||||
"src": ["tag:dev-app-servers"],
|
||||
"proto": "tcp",
|
||||
"dst": ["tag:dev-databases:5432"]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["tag:prod-app-servers"],
|
||||
"ports": ["tag:prod-databases:5432"]
|
||||
"src": ["tag:prod-app-servers"],
|
||||
"dst": ["tag:prod-databases:5432"]
|
||||
},
|
||||
|
||||
// interns have access to dev-app-servers only in reading mode
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["group:intern"],
|
||||
"ports": ["tag:dev-app-servers:80,443"]
|
||||
"src": ["group:intern"],
|
||||
"dst": ["tag:dev-app-servers:80,443"]
|
||||
},
|
||||
|
||||
// We still have to allow internal namespaces communications since nothing guarantees that each user have
|
||||
// their own namespaces.
|
||||
{ "action": "accept", "users": ["boss"], "ports": ["boss:*"] },
|
||||
{ "action": "accept", "users": ["dev1"], "ports": ["dev1:*"] },
|
||||
{ "action": "accept", "users": ["dev2"], "ports": ["dev2:*"] },
|
||||
{ "action": "accept", "users": ["admin1"], "ports": ["admin1:*"] },
|
||||
{ "action": "accept", "users": ["intern1"], "ports": ["intern1:*"] }
|
||||
{ "action": "accept", "src": ["boss"], "dst": ["boss:*"] },
|
||||
{ "action": "accept", "src": ["dev1"], "dst": ["dev1:*"] },
|
||||
{ "action": "accept", "src": ["dev2"], "dst": ["dev2:*"] },
|
||||
{ "action": "accept", "src": ["admin1"], "dst": ["admin1:*"] },
|
||||
{ "action": "accept", "src": ["intern1"], "dst": ["intern1:*"] }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
19
docs/android-client.md
Normal file
19
docs/android-client.md
Normal file
@@ -0,0 +1,19 @@
|
||||
# Connecting an Android client
|
||||
|
||||
## Goal
|
||||
|
||||
This documentation has the goal of showing how a user can use the official Android [Tailscale](https://tailscale.com) client with `headscale`.
|
||||
|
||||
## Installation
|
||||
|
||||
Install the official Tailscale Android client from the [Google Play Store](https://play.google.com/store/apps/details?id=com.tailscale.ipn) or [F-Droid](https://f-droid.org/packages/com.tailscale.ipn/).
|
||||
|
||||
Ensure that the installed version is at least 1.30.0, as that is the first release to support custom URLs.
|
||||
|
||||
## Configuring the headscale URL
|
||||
|
||||
After opening the app, the kebab menu icon (three dots) on the top bar on the right must be repeatedly opened and closed until the _Change server_ option appears in the menu. This is where you can enter your headscale URL.
|
||||
|
||||
A screen recording of this process can be seen in the `tailscale-android` PR which implemented this functionality: <https://github.com/tailscale/tailscale-android/pull/55>
|
||||
|
||||
After saving and restarting the app, selecting the regular _Sign in_ option (non-SSO) should open up the headscale authentication page.
|
||||
BIN
docs/images/headscale-acl-network.png
Normal file
BIN
docs/images/headscale-acl-network.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 56 KiB |
BIN
docs/logo/headscale3-dots.pdf
Normal file
BIN
docs/logo/headscale3-dots.pdf
Normal file
Binary file not shown.
BIN
docs/logo/headscale3-dots.png
Normal file
BIN
docs/logo/headscale3-dots.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 34 KiB |
BIN
docs/logo/headscale3_header_stacked_left.pdf
Normal file
BIN
docs/logo/headscale3_header_stacked_left.pdf
Normal file
Binary file not shown.
BIN
docs/logo/headscale3_header_stacked_left.png
Normal file
BIN
docs/logo/headscale3_header_stacked_left.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 49 KiB |
@@ -24,7 +24,7 @@ To create a API key, log into your `headscale` server and generate a key:
|
||||
headscale apikeys create --expiration 90d
|
||||
```
|
||||
|
||||
Copy the output of the command and save it for later. Please not that you can not retrieve a key again,
|
||||
Copy the output of the command and save it for later. Please note that you can not retrieve a key again,
|
||||
if the key is lost, expire the old one, and create a new key.
|
||||
|
||||
To list the keys currently assosicated with the server:
|
||||
|
||||
100
docs/reverse-proxy.md
Normal file
100
docs/reverse-proxy.md
Normal file
@@ -0,0 +1,100 @@
|
||||
# Running headscale behind a reverse proxy
|
||||
|
||||
Running headscale behind a reverse proxy is useful when running multiple applications on the same server, and you want to reuse the same external IP and port - usually tcp/443 for HTTPS.
|
||||
|
||||
### WebSockets
|
||||
|
||||
The reverse proxy MUST be configured to support WebSockets, as it is needed for clients running Tailscale v1.30+.
|
||||
|
||||
WebSockets support is required when using the headscale embedded DERP server. In this case, you will also need to expose the UDP port used for STUN (by default, udp/3478). Please check our [config-example.yaml](https://github.com/juanfont/headscale/blob/main/config-example.yaml).
|
||||
|
||||
### TLS
|
||||
|
||||
Headscale can be configured not to use TLS, leaving it to the reverse proxy to handle. Add the following configuration values to your headscale config file.
|
||||
|
||||
```yaml
|
||||
server_url: https://<YOUR_SERVER_NAME> # This should be the FQDN at which headscale will be served
|
||||
listen_addr: 0.0.0.0:8080
|
||||
metrics_listen_addr: 0.0.0.0:9090
|
||||
tls_cert_path: ""
|
||||
tls_key_path: ""
|
||||
```
|
||||
|
||||
## nginx
|
||||
|
||||
The following example configuration can be used in your nginx setup, substituting values as necessary. `<IP:PORT>` should be the IP address and port where headscale is running. In most cases, this will be `http://localhost:8080`.
|
||||
|
||||
```Nginx
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default keep-alive;
|
||||
'websocket' upgrade;
|
||||
'' close;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
listen [::]:80;
|
||||
|
||||
listen 443 ssl http2;
|
||||
listen [::]:443 ssl http2;
|
||||
|
||||
server_name <YOUR_SERVER_NAME>;
|
||||
|
||||
ssl_certificate <PATH_TO_CERT>;
|
||||
ssl_certificate_key <PATH_CERT_KEY>;
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
|
||||
location / {
|
||||
proxy_pass http://<IP:PORT>;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
proxy_set_header Host $server_name;
|
||||
proxy_redirect http:// https://;
|
||||
proxy_buffering off;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
|
||||
add_header Strict-Transport-Security "max-age=15552000; includeSubDomains" always;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## istio/envoy
|
||||
|
||||
If you using [Istio](https://istio.io/) ingressgateway or [Envoy](https://www.envoyproxy.io/) as reverse proxy, there are some tips for you. If not set, you may see some debug log in proxy as below:
|
||||
|
||||
```log
|
||||
Sending local reply with details upgrade_failed
|
||||
```
|
||||
|
||||
### Envoy
|
||||
|
||||
You need add a new upgrade_type named `tailscale-control-protocol`. [see detail](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto#extensions-filters-network-http-connection-manager-v3-httpconnectionmanager-upgradeconfig)
|
||||
|
||||
### Istio
|
||||
|
||||
Same as envoy, we can use `EnvoyFilter` to add upgrade_type.
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: EnvoyFilter
|
||||
metadata:
|
||||
name: headscale-behind-istio-ingress
|
||||
namespace: istio-system
|
||||
spec:
|
||||
configPatches:
|
||||
- applyTo: NETWORK_FILTER
|
||||
match:
|
||||
listener:
|
||||
filterChain:
|
||||
filter:
|
||||
name: envoy.filters.network.http_connection_manager
|
||||
patch:
|
||||
operation: MERGE
|
||||
value:
|
||||
typed_config:
|
||||
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
|
||||
upgrade_configs:
|
||||
- upgrade_type: tailscale-control-protocol
|
||||
```
|
||||
@@ -48,11 +48,15 @@ Modify the config file to your preferences before launching Docker container.
|
||||
Here are some settings that you likely want:
|
||||
|
||||
```yaml
|
||||
server_url: http://your-host-name:8080 # Change to your hostname or host IP
|
||||
# Change to your hostname or host IP
|
||||
server_url: http://your-host-name:8080
|
||||
# Listen to 0.0.0.0 so it's accessible outside the container
|
||||
metrics_listen_addr: 0.0.0.0:9090
|
||||
# The default /var/lib/headscale path is not writable in the container
|
||||
private_key_path: /etc/headscale/private.key
|
||||
# The default /var/lib/headscale path is not writable in the container
|
||||
noise:
|
||||
private_key_path: /etc/headscale/noise_private.key
|
||||
# The default /var/lib/headscale path is not writable in the container
|
||||
db_path: /etc/headscale/db.sqlite
|
||||
```
|
||||
@@ -63,7 +67,6 @@ db_path: /etc/headscale/db.sqlite
|
||||
docker run \
|
||||
--name headscale \
|
||||
--detach \
|
||||
--rm \
|
||||
--volume $(pwd)/config:/etc/headscale/ \
|
||||
--publish 127.0.0.1:8080:8080 \
|
||||
--publish 127.0.0.1:9090:9090 \
|
||||
|
||||
206
docs/running-headscale-openbsd.md
Normal file
206
docs/running-headscale-openbsd.md
Normal file
@@ -0,0 +1,206 @@
|
||||
# Running headscale on OpenBSD
|
||||
|
||||
## Goal
|
||||
|
||||
This documentation has the goal of showing a user how-to install and run `headscale` on OpenBSD 7.1.
|
||||
In additional to the "get up and running section", there is an optional [rc.d section](#running-headscale-in-the-background-with-rcd)
|
||||
describing how to make `headscale` run properly in a server environment.
|
||||
|
||||
## Install `headscale`
|
||||
|
||||
1. Install from ports (Not Recommend)
|
||||
|
||||
As of OpenBSD 7.1, there's a headscale in ports collection, however, it's severely outdated(v0.12.4).
|
||||
You can install it via `pkg_add headscale`.
|
||||
|
||||
2. Install from source on OpenBSD 7.1
|
||||
|
||||
```shell
|
||||
# Install prerequistes
|
||||
# 1. go v1.19+: headscale newer than 0.17 needs go 1.19+ to compile
|
||||
# 2. gmake: Makefile in the headscale repo is written in GNU make syntax
|
||||
pkg_add -D snap go
|
||||
pkg_add gmake
|
||||
|
||||
git clone https://github.com/juanfont/headscale.git
|
||||
|
||||
cd headscale
|
||||
|
||||
# optionally checkout a release
|
||||
# option a. you can find offical relase at https://github.com/juanfont/headscale/releases/latest
|
||||
# option b. get latest tag, this may be a beta release
|
||||
latestTag=$(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
|
||||
git checkout $latestTag
|
||||
|
||||
gmake build
|
||||
|
||||
# make it executable
|
||||
chmod a+x headscale
|
||||
|
||||
# copy it to /usr/local/sbin
|
||||
cp headscale /usr/local/sbin
|
||||
```
|
||||
|
||||
3. Install from source via cross compile
|
||||
|
||||
```shell
|
||||
# Install prerequistes
|
||||
# 1. go v1.19+: headscale newer than 0.17 needs go 1.19+ to compile
|
||||
# 2. gmake: Makefile in the headscale repo is written in GNU make syntax
|
||||
|
||||
git clone https://github.com/juanfont/headscale.git
|
||||
|
||||
cd headscale
|
||||
|
||||
# optionally checkout a release
|
||||
# option a. you can find offical relase at https://github.com/juanfont/headscale/releases/latest
|
||||
# option b. get latest tag, this may be a beta release
|
||||
latestTag=$(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
|
||||
git checkout $latestTag
|
||||
|
||||
make build GOOS=openbsd
|
||||
|
||||
# copy headscale to openbsd machine and put it in /usr/local/sbin
|
||||
```
|
||||
|
||||
## Configure and run `headscale`
|
||||
|
||||
1. Prepare a directory to hold `headscale` configuration and the [SQLite](https://www.sqlite.org/) database:
|
||||
|
||||
```shell
|
||||
# Directory for configuration
|
||||
|
||||
mkdir -p /etc/headscale
|
||||
|
||||
# Directory for Database, and other variable data (like certificates)
|
||||
mkdir -p /var/lib/headscale
|
||||
```
|
||||
|
||||
2. Create an empty SQLite database:
|
||||
|
||||
```shell
|
||||
touch /var/lib/headscale/db.sqlite
|
||||
```
|
||||
|
||||
3. Create a `headscale` configuration:
|
||||
|
||||
```shell
|
||||
touch /etc/headscale/config.yaml
|
||||
```
|
||||
|
||||
It is **strongly recommended** to copy and modify the [example configuration](../config-example.yaml)
|
||||
from the [headscale repository](../)
|
||||
|
||||
4. Start the headscale server:
|
||||
|
||||
```shell
|
||||
headscale serve
|
||||
```
|
||||
|
||||
This command will start `headscale` in the current terminal session.
|
||||
|
||||
---
|
||||
|
||||
To continue the tutorial, open a new terminal and let it run in the background.
|
||||
Alternatively use terminal emulators like [tmux](https://github.com/tmux/tmux).
|
||||
|
||||
To run `headscale` in the background, please follow the steps in the [rc.d section](#running-headscale-in-the-background-with-rcd) before continuing.
|
||||
|
||||
5. Verify `headscale` is running:
|
||||
|
||||
Verify `headscale` is available:
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:9090/metrics
|
||||
```
|
||||
|
||||
6. Create a namespace ([tailnet](https://tailscale.com/kb/1136/tailnet/)):
|
||||
|
||||
```shell
|
||||
headscale namespaces create myfirstnamespace
|
||||
```
|
||||
|
||||
### Register a machine (normal login)
|
||||
|
||||
On a client machine, execute the `tailscale` login command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server YOUR_HEADSCALE_URL
|
||||
```
|
||||
|
||||
Register the machine:
|
||||
|
||||
```shell
|
||||
headscale --namespace myfirstnamespace nodes register --key <YOU_+MACHINE_KEY>
|
||||
```
|
||||
|
||||
### Register machine using a pre authenticated key
|
||||
|
||||
Generate a key using the command line:
|
||||
|
||||
```shell
|
||||
headscale --namespace myfirstnamespace preauthkeys create --reusable --expiration 24h
|
||||
```
|
||||
|
||||
This will return a pre-authenticated key that can be used to connect a node to `headscale` during the `tailscale` command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
```
|
||||
|
||||
## Running `headscale` in the background with rc.d
|
||||
|
||||
This section demonstrates how to run `headscale` as a service in the background with [rc.d](https://man.openbsd.org/rc.d).
|
||||
|
||||
1. Create a rc.d service at `/etc/rc.d/headscale` containing:
|
||||
|
||||
```shell
|
||||
#!/bin/ksh
|
||||
|
||||
daemon="/usr/local/sbin/headscale"
|
||||
daemon_logger="daemon.info"
|
||||
daemon_user="root"
|
||||
daemon_flags="serve"
|
||||
daemon_timeout=60
|
||||
|
||||
. /etc/rc.d/rc.subr
|
||||
|
||||
rc_bg=YES
|
||||
rc_reload=NO
|
||||
|
||||
rc_cmd $1
|
||||
```
|
||||
|
||||
2. `/etc/rc.d/headscale` needs execute permission:
|
||||
|
||||
```shell
|
||||
chmod a+x /etc/rc.d/headscale
|
||||
```
|
||||
|
||||
3. Start `headscale` service:
|
||||
|
||||
```shell
|
||||
rcctl start headscale
|
||||
```
|
||||
|
||||
4. Make `headscale` service start at boot:
|
||||
|
||||
```shell
|
||||
rcctl enable headscale
|
||||
```
|
||||
|
||||
5. Verify the headscale service:
|
||||
|
||||
```shell
|
||||
rcctl check headscale
|
||||
```
|
||||
|
||||
Verify `headscale` is available:
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:9090/metrics
|
||||
```
|
||||
|
||||
`headscale` will now run in the background and start at boot.
|
||||
19
flake.lock
generated
19
flake.lock
generated
@@ -2,11 +2,11 @@
|
||||
"nodes": {
|
||||
"flake-utils": {
|
||||
"locked": {
|
||||
"lastModified": 1644229661,
|
||||
"narHash": "sha256-1YdnJAsNy69bpcjuoKdOYQX0YxZBiCYZo4Twxerqv7k=",
|
||||
"lastModified": 1659877975,
|
||||
"narHash": "sha256-zllb8aq3YO3h8B/U0/J1WBgAL8EX5yWf5pMj3G0NAmc=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "3cecb5b042f7f209c56ffd8371b2711a290ec797",
|
||||
"rev": "c0e246b9b83f637f4681389ecabcb2681b4f3af0",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -17,17 +17,18 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1647536224,
|
||||
"narHash": "sha256-SUIiz4DhMXgM7i+hvFWmLnhywr1WeRGIz+EIbwQQguM=",
|
||||
"lastModified": 1666869603,
|
||||
"narHash": "sha256-3V53or4Vpu4+LrGfGSh3T2V8+qf5RP6nRuex9GywkwE=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "dd8cebebbf0f9352501f251ac37b851d947f92dc",
|
||||
"rev": "2001e2b31c565bcdf7bc13062b8d7cfccaca05b8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"id": "nixpkgs",
|
||||
"ref": "master",
|
||||
"type": "indirect"
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
|
||||
291
flake.nix
291
flake.nix
@@ -2,147 +2,178 @@
|
||||
description = "headscale - Open Source Tailscale Control server";
|
||||
|
||||
inputs = {
|
||||
# TODO: Use unstable when Go 1.18 has made it in
|
||||
# https://nixpk.gs/pr-tracker.html?pr=164292
|
||||
# nixpkgs.url = "nixpkgs/nixpkgs-unstable";
|
||||
nixpkgs.url = "nixpkgs/master";
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
||||
flake-utils.url = "github:numtide/flake-utils";
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs, flake-utils, ... }:
|
||||
let
|
||||
headscaleVersion = if (self ? shortRev) then self.shortRev else "dev";
|
||||
in
|
||||
outputs = {
|
||||
self,
|
||||
nixpkgs,
|
||||
flake-utils,
|
||||
...
|
||||
}: let
|
||||
headscaleVersion =
|
||||
if (self ? shortRev)
|
||||
then self.shortRev
|
||||
else "dev";
|
||||
in
|
||||
{
|
||||
overlay = final: prev:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${prev.system};
|
||||
in
|
||||
rec {
|
||||
golines =
|
||||
pkgs.buildGoModule rec {
|
||||
pname = "golines";
|
||||
version = "0.9.0";
|
||||
overlay = _: prev: let
|
||||
pkgs = nixpkgs.legacyPackages.${prev.system};
|
||||
in rec {
|
||||
headscale = pkgs.buildGo119Module rec {
|
||||
pname = "headscale";
|
||||
version = headscaleVersion;
|
||||
src = pkgs.lib.cleanSource self;
|
||||
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "segmentio";
|
||||
repo = "golines";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-BUXEg+4r9L/gqe4DhTlhN55P3jWt7ZyWFQycO6QePrw=";
|
||||
};
|
||||
tags = ["ts2019"];
|
||||
|
||||
vendorSha256 = "sha256-sEzWUeVk5GB0H41wrp12P8sBWRjg0FHUX6ABDEEBqK8=";
|
||||
# Only run unit tests when testing a build
|
||||
checkFlags = ["-short"];
|
||||
|
||||
nativeBuildInputs = [ pkgs.installShellFiles ];
|
||||
};
|
||||
# When updating go.mod or go.sum, a new sha will need to be calculated,
|
||||
# update this if you have a mismatch after doing a change to thos files.
|
||||
vendorSha256 = "sha256-Cq0WipTQ+kGcvnfP0kjyvjyonl2OC9W7Tj0MCuB1lDU=";
|
||||
|
||||
protoc-gen-grpc-gateway =
|
||||
pkgs.buildGoModule rec {
|
||||
pname = "grpc-gateway";
|
||||
version = "2.8.0";
|
||||
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "grpc-ecosystem";
|
||||
repo = "grpc-gateway";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-8eBBBYJ+tBjB2fgPMX/ZlbN3eeS75e8TAZYOKXs6hcg=";
|
||||
};
|
||||
|
||||
vendorSha256 = "sha256-AW2Gn/mlZyLMwF+NpK59eiOmQrYWW/9HPjbunYc9Ij4=";
|
||||
|
||||
nativeBuildInputs = [ pkgs.installShellFiles ];
|
||||
|
||||
subPackages = [ "protoc-gen-grpc-gateway" "protoc-gen-openapiv2" ];
|
||||
};
|
||||
|
||||
headscale =
|
||||
pkgs.buildGo118Module rec {
|
||||
pname = "headscale";
|
||||
version = headscaleVersion;
|
||||
src = pkgs.lib.cleanSource self;
|
||||
|
||||
# When updating go.mod or go.sum, a new sha will need to be calculated,
|
||||
# update this if you have a mismatch after doing a change to thos files.
|
||||
vendorSha256 = "sha256-VsMhgAP0YY6oo/iW7UXg6jc/rv5oZLSkluQ12TKsXXs=";
|
||||
|
||||
ldflags = [ "-s" "-w" "-X github.com/juanfont/headscale/cmd/headscale/cli.Version=v${version}" ];
|
||||
};
|
||||
ldflags = ["-s" "-w" "-X github.com/juanfont/headscale/cmd/headscale/cli.Version=v${version}"];
|
||||
};
|
||||
} // flake-utils.lib.eachDefaultSystem
|
||||
(system:
|
||||
let
|
||||
pkgs = import nixpkgs {
|
||||
overlays = [ self.overlay ];
|
||||
inherit system;
|
||||
|
||||
golines = pkgs.buildGoModule rec {
|
||||
pname = "golines";
|
||||
version = "0.11.0";
|
||||
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "segmentio";
|
||||
repo = "golines";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-2K9KAg8iSubiTbujyFGN3yggrL+EDyeUCs9OOta/19A=";
|
||||
};
|
||||
buildDeps = with pkgs; [ git go_1_18 gnumake ];
|
||||
devDeps = with pkgs;
|
||||
buildDeps ++ [
|
||||
|
||||
vendorSha256 = "sha256-rxYuzn4ezAxaeDhxd8qdOzt+CKYIh03A9zKNdzILq18=";
|
||||
|
||||
nativeBuildInputs = [pkgs.installShellFiles];
|
||||
};
|
||||
|
||||
golangci-lint = prev.golangci-lint.override {
|
||||
# Override https://github.com/NixOS/nixpkgs/pull/166801 which changed this
|
||||
# to buildGo118Module because it does not build on Darwin.
|
||||
inherit (prev) buildGoModule;
|
||||
};
|
||||
|
||||
# golangci-lint =
|
||||
# pkgs.buildGo117Module rec {
|
||||
# pname = "golangci-lint";
|
||||
# version = "1.46.2";
|
||||
#
|
||||
# src = pkgs.fetchFromGitHub {
|
||||
# owner = "golangci";
|
||||
# repo = "golangci-lint";
|
||||
# rev = "v${version}";
|
||||
# sha256 = "sha256-7sDAwWz+qoB/ngeH35tsJ5FZUfAQvQsU6kU9rUHIHMk=";
|
||||
# };
|
||||
#
|
||||
# vendorSha256 = "sha256-w38OKN6HPoz37utG/2QSPMai55IRDXCIIymeMe6ogIU=";
|
||||
#
|
||||
# nativeBuildInputs = [ pkgs.installShellFiles ];
|
||||
# };
|
||||
|
||||
protoc-gen-grpc-gateway = pkgs.buildGoModule rec {
|
||||
pname = "grpc-gateway";
|
||||
version = "2.8.0";
|
||||
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "grpc-ecosystem";
|
||||
repo = "grpc-gateway";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-8eBBBYJ+tBjB2fgPMX/ZlbN3eeS75e8TAZYOKXs6hcg=";
|
||||
};
|
||||
|
||||
vendorSha256 = "sha256-AW2Gn/mlZyLMwF+NpK59eiOmQrYWW/9HPjbunYc9Ij4=";
|
||||
|
||||
nativeBuildInputs = [pkgs.installShellFiles];
|
||||
|
||||
subPackages = ["protoc-gen-grpc-gateway" "protoc-gen-openapiv2"];
|
||||
};
|
||||
};
|
||||
}
|
||||
// flake-utils.lib.eachDefaultSystem
|
||||
(system: let
|
||||
pkgs = import nixpkgs {
|
||||
overlays = [self.overlay];
|
||||
inherit system;
|
||||
};
|
||||
buildDeps = with pkgs; [git go_1_19 gnumake];
|
||||
devDeps = with pkgs;
|
||||
buildDeps
|
||||
++ [
|
||||
golangci-lint
|
||||
golines
|
||||
nodePackages.prettier
|
||||
|
||||
# Protobuf dependencies
|
||||
protobuf
|
||||
protoc-gen-go
|
||||
protoc-gen-go-grpc
|
||||
protoc-gen-grpc-gateway
|
||||
buf
|
||||
clang-tools # clang-format
|
||||
];
|
||||
|
||||
# Add entry to build a docker image with headscale
|
||||
# caveat: only works on Linux
|
||||
#
|
||||
# Usage:
|
||||
# nix build .#headscale-docker
|
||||
# docker load < result
|
||||
headscale-docker = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "headscale";
|
||||
tag = headscaleVersion;
|
||||
contents = [pkgs.headscale];
|
||||
config.Entrypoint = [(pkgs.headscale + "/bin/headscale")];
|
||||
};
|
||||
in rec {
|
||||
# `nix develop`
|
||||
devShell = pkgs.mkShell {
|
||||
buildInputs = devDeps;
|
||||
|
||||
shellHook = ''
|
||||
export GOFLAGS=-tags="ts2019"
|
||||
'';
|
||||
};
|
||||
|
||||
# `nix build`
|
||||
packages = with pkgs; {
|
||||
inherit headscale;
|
||||
inherit headscale-docker;
|
||||
};
|
||||
|
||||
defaultPackage = pkgs.headscale;
|
||||
|
||||
# `nix run`
|
||||
apps.headscale = flake-utils.lib.mkApp {
|
||||
drv = packages.headscale;
|
||||
};
|
||||
defaultApp = apps.headscale;
|
||||
|
||||
checks = {
|
||||
format =
|
||||
pkgs.runCommand "check-format"
|
||||
{
|
||||
buildInputs = with pkgs; [
|
||||
gnumake
|
||||
nixpkgs-fmt
|
||||
golangci-lint
|
||||
golines
|
||||
nodePackages.prettier
|
||||
|
||||
# Protobuf dependencies
|
||||
protobuf
|
||||
protoc-gen-go
|
||||
protoc-gen-go-grpc
|
||||
protoc-gen-grpc-gateway
|
||||
buf
|
||||
clang-tools # clang-format
|
||||
golines
|
||||
clang-tools
|
||||
];
|
||||
|
||||
|
||||
# Add entry to build a docker image with headscale
|
||||
# caveat: only works on Linux
|
||||
#
|
||||
# Usage:
|
||||
# nix build .#headscale-docker
|
||||
# docker load < result
|
||||
headscale-docker = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "headscale";
|
||||
tag = headscaleVersion;
|
||||
contents = [ pkgs.headscale ];
|
||||
config.Entrypoint = [ (pkgs.headscale + "/bin/headscale") ];
|
||||
};
|
||||
in
|
||||
rec {
|
||||
# `nix develop`
|
||||
devShell = pkgs.mkShell { buildInputs = devDeps; };
|
||||
|
||||
# `nix build`
|
||||
packages = with pkgs; {
|
||||
inherit headscale;
|
||||
inherit headscale-docker;
|
||||
};
|
||||
|
||||
defaultPackage = pkgs.headscale;
|
||||
|
||||
# `nix run`
|
||||
apps.headscale = flake-utils.lib.mkApp {
|
||||
drv = packages.headscale;
|
||||
};
|
||||
defaultApp = apps.headscale;
|
||||
|
||||
checks = {
|
||||
format = pkgs.runCommand "check-format"
|
||||
{
|
||||
buildInputs = with pkgs; [
|
||||
gnumake
|
||||
nixpkgs-fmt
|
||||
golangci-lint
|
||||
nodePackages.prettier
|
||||
golines
|
||||
clang-tools
|
||||
];
|
||||
} ''
|
||||
${pkgs.nixpkgs-fmt}/bin/nixpkgs-fmt ${./.}
|
||||
${pkgs.golangci-lint}/bin/golangci-lint run --fix --timeout 10m
|
||||
${pkgs.nodePackages.prettier}/bin/prettier --write '**/**.{ts,js,md,yaml,yml,sass,css,scss,html}'
|
||||
${pkgs.golines}/bin/golines --max-len=88 --base-formatter=gofumpt -w ${./.}
|
||||
${pkgs.clang-tools}/bin/clang-format -style="{BasedOnStyle: Google, IndentWidth: 4, AlignConsecutiveDeclarations: true, AlignConsecutiveAssignments: true, ColumnLimit: 0}" -i ${./.}
|
||||
'';
|
||||
};
|
||||
|
||||
|
||||
});
|
||||
} ''
|
||||
${pkgs.nixpkgs-fmt}/bin/nixpkgs-fmt ${./.}
|
||||
${pkgs.golangci-lint}/bin/golangci-lint run --fix --timeout 10m
|
||||
${pkgs.nodePackages.prettier}/bin/prettier --write '**/**.{ts,js,md,yaml,yml,sass,css,scss,html}'
|
||||
${pkgs.golines}/bin/golines --max-len=88 --base-formatter=gofumpt -w ${./.}
|
||||
${pkgs.clang-tools}/bin/clang-format -style="{BasedOnStyle: Google, IndentWidth: 4, AlignConsecutiveDeclarations: true, AlignConsecutiveAssignments: true, ColumnLimit: 0}" -i ${./.}
|
||||
'';
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/apikey.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/device.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/headscale.proto
|
||||
|
||||
@@ -36,7 +36,7 @@ var file_headscale_v1_headscale_proto_rawDesc = []byte{
|
||||
0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76,
|
||||
0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69,
|
||||
0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0xa3, 0x13, 0x0a, 0x10, 0x48, 0x65,
|
||||
0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0xb1, 0x16, 0x0a, 0x10, 0x48, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x77,
|
||||
0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65,
|
||||
@@ -119,82 +119,106 @@ var file_headscale_v1_headscale_proto_rawDesc = []byte{
|
||||
0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
|
||||
0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x80, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74,
|
||||
0x65, 0x72, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12, 0x24, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65,
|
||||
0x72, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52,
|
||||
0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x74, 0x0a, 0x07, 0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x73,
|
||||
0x12, 0x1c, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65,
|
||||
0x74, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2c, 0x82,
|
||||
0xd3, 0xe4, 0x93, 0x02, 0x26, 0x22, 0x21, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f,
|
||||
0x69, 0x64, 0x7d, 0x2f, 0x74, 0x61, 0x67, 0x73, 0x3a, 0x01, 0x2a, 0x12, 0x80, 0x01, 0x0a, 0x0f,
|
||||
0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12,
|
||||
0x24, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52,
|
||||
0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x22, 0x18,
|
||||
0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f,
|
||||
0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x12, 0x7e, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65,
|
||||
0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c,
|
||||
0x65, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x2a, 0x1c, 0x2f, 0x61, 0x70, 0x69,
|
||||
0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x85, 0x01, 0x0a, 0x0d, 0x45, 0x78, 0x70,
|
||||
0x69, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12, 0x22, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65,
|
||||
0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78,
|
||||
0x70, 0x69, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x22, 0x23, 0x2f, 0x61, 0x70,
|
||||
0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61,
|
||||
0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65,
|
||||
0x12, 0x6e, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73,
|
||||
0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x17, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x11, 0x12,
|
||||
0x0f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x12, 0x8b, 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52,
|
||||
0x6f, 0x75, 0x74, 0x65, 0x12, 0x24, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f,
|
||||
0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x12, 0x23, 0x2f, 0x61, 0x70, 0x69, 0x2f,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3,
|
||||
0xe4, 0x93, 0x02, 0x1a, 0x22, 0x18, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61,
|
||||
0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x12, 0x7e,
|
||||
0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12,
|
||||
0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44,
|
||||
0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e,
|
||||
0x2a, 0x1c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x85,
|
||||
0x01, 0x0a, 0x0d, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x12, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02,
|
||||
0x25, 0x22, 0x23, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f,
|
||||
0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x12, 0x90, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x6e, 0x61, 0x6d,
|
||||
0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4d, 0x61,
|
||||
0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61,
|
||||
0x6d, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x22, 0x36, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x30, 0x22, 0x2e, 0x2f, 0x61, 0x70, 0x69, 0x2f,
|
||||
0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68,
|
||||
0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x97,
|
||||
0x01, 0x0a, 0x13, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x28, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x63, 0x68,
|
||||
0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x1a, 0x29, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4,
|
||||
0x93, 0x02, 0x25, 0x22, 0x23, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64,
|
||||
0x7d, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x70, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61,
|
||||
0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70,
|
||||
0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x2f, 0x7b,
|
||||
0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0x6e, 0x0a, 0x0c, 0x4c, 0x69, 0x73,
|
||||
0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74,
|
||||
0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x22, 0x17, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x11, 0x12, 0x0f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76,
|
||||
0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x4d, 0x6f,
|
||||
0x76, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x4d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x4d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2e,
|
||||
0x82, 0xd3, 0xe4, 0x93, 0x02, 0x28, 0x22, 0x26, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f,
|
||||
0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x8b,
|
||||
0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x12, 0x24, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74,
|
||||
0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
|
||||
0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x12, 0x23, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x97, 0x01, 0x0a,
|
||||
0x13, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f,
|
||||
0x75, 0x74, 0x65, 0x73, 0x12, 0x28, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e,
|
||||
0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02,
|
||||
0x25, 0x22, 0x23, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f,
|
||||
0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x70, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
|
||||
0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b,
|
||||
0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41,
|
||||
0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x19, 0x82,
|
||||
0xd3, 0xe4, 0x93, 0x02, 0x13, 0x22, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61,
|
||||
0x70, 0x69, 0x6b, 0x65, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x77, 0x0a, 0x0c, 0x45, 0x78, 0x70, 0x69,
|
||||
0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70,
|
||||
0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72,
|
||||
0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
|
||||
0x19, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x13, 0x22, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x77, 0x0a, 0x0c, 0x45, 0x78,
|
||||
0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65,
|
||||
0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70,
|
||||
0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x22, 0x15, 0x2f, 0x61, 0x70, 0x69, 0x2f,
|
||||
0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65,
|
||||
0x3a, 0x01, 0x2a, 0x12, 0x6a, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65,
|
||||
0x79, 0x73, 0x12, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12,
|
||||
0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x42,
|
||||
0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75,
|
||||
0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x33,
|
||||
0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x22, 0x15, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x3a, 0x01,
|
||||
0x2a, 0x12, 0x6a, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73,
|
||||
0x12, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f,
|
||||
0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x42, 0x29, 0x5a,
|
||||
0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e,
|
||||
0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67,
|
||||
0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var file_headscale_v1_headscale_proto_goTypes = []interface{}{
|
||||
@@ -208,34 +232,40 @@ var file_headscale_v1_headscale_proto_goTypes = []interface{}{
|
||||
(*ListPreAuthKeysRequest)(nil), // 7: headscale.v1.ListPreAuthKeysRequest
|
||||
(*DebugCreateMachineRequest)(nil), // 8: headscale.v1.DebugCreateMachineRequest
|
||||
(*GetMachineRequest)(nil), // 9: headscale.v1.GetMachineRequest
|
||||
(*RegisterMachineRequest)(nil), // 10: headscale.v1.RegisterMachineRequest
|
||||
(*DeleteMachineRequest)(nil), // 11: headscale.v1.DeleteMachineRequest
|
||||
(*ExpireMachineRequest)(nil), // 12: headscale.v1.ExpireMachineRequest
|
||||
(*ListMachinesRequest)(nil), // 13: headscale.v1.ListMachinesRequest
|
||||
(*GetMachineRouteRequest)(nil), // 14: headscale.v1.GetMachineRouteRequest
|
||||
(*EnableMachineRoutesRequest)(nil), // 15: headscale.v1.EnableMachineRoutesRequest
|
||||
(*CreateApiKeyRequest)(nil), // 16: headscale.v1.CreateApiKeyRequest
|
||||
(*ExpireApiKeyRequest)(nil), // 17: headscale.v1.ExpireApiKeyRequest
|
||||
(*ListApiKeysRequest)(nil), // 18: headscale.v1.ListApiKeysRequest
|
||||
(*GetNamespaceResponse)(nil), // 19: headscale.v1.GetNamespaceResponse
|
||||
(*CreateNamespaceResponse)(nil), // 20: headscale.v1.CreateNamespaceResponse
|
||||
(*RenameNamespaceResponse)(nil), // 21: headscale.v1.RenameNamespaceResponse
|
||||
(*DeleteNamespaceResponse)(nil), // 22: headscale.v1.DeleteNamespaceResponse
|
||||
(*ListNamespacesResponse)(nil), // 23: headscale.v1.ListNamespacesResponse
|
||||
(*CreatePreAuthKeyResponse)(nil), // 24: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 25: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysResponse)(nil), // 26: headscale.v1.ListPreAuthKeysResponse
|
||||
(*DebugCreateMachineResponse)(nil), // 27: headscale.v1.DebugCreateMachineResponse
|
||||
(*GetMachineResponse)(nil), // 28: headscale.v1.GetMachineResponse
|
||||
(*RegisterMachineResponse)(nil), // 29: headscale.v1.RegisterMachineResponse
|
||||
(*DeleteMachineResponse)(nil), // 30: headscale.v1.DeleteMachineResponse
|
||||
(*ExpireMachineResponse)(nil), // 31: headscale.v1.ExpireMachineResponse
|
||||
(*ListMachinesResponse)(nil), // 32: headscale.v1.ListMachinesResponse
|
||||
(*GetMachineRouteResponse)(nil), // 33: headscale.v1.GetMachineRouteResponse
|
||||
(*EnableMachineRoutesResponse)(nil), // 34: headscale.v1.EnableMachineRoutesResponse
|
||||
(*CreateApiKeyResponse)(nil), // 35: headscale.v1.CreateApiKeyResponse
|
||||
(*ExpireApiKeyResponse)(nil), // 36: headscale.v1.ExpireApiKeyResponse
|
||||
(*ListApiKeysResponse)(nil), // 37: headscale.v1.ListApiKeysResponse
|
||||
(*SetTagsRequest)(nil), // 10: headscale.v1.SetTagsRequest
|
||||
(*RegisterMachineRequest)(nil), // 11: headscale.v1.RegisterMachineRequest
|
||||
(*DeleteMachineRequest)(nil), // 12: headscale.v1.DeleteMachineRequest
|
||||
(*ExpireMachineRequest)(nil), // 13: headscale.v1.ExpireMachineRequest
|
||||
(*RenameMachineRequest)(nil), // 14: headscale.v1.RenameMachineRequest
|
||||
(*ListMachinesRequest)(nil), // 15: headscale.v1.ListMachinesRequest
|
||||
(*MoveMachineRequest)(nil), // 16: headscale.v1.MoveMachineRequest
|
||||
(*GetMachineRouteRequest)(nil), // 17: headscale.v1.GetMachineRouteRequest
|
||||
(*EnableMachineRoutesRequest)(nil), // 18: headscale.v1.EnableMachineRoutesRequest
|
||||
(*CreateApiKeyRequest)(nil), // 19: headscale.v1.CreateApiKeyRequest
|
||||
(*ExpireApiKeyRequest)(nil), // 20: headscale.v1.ExpireApiKeyRequest
|
||||
(*ListApiKeysRequest)(nil), // 21: headscale.v1.ListApiKeysRequest
|
||||
(*GetNamespaceResponse)(nil), // 22: headscale.v1.GetNamespaceResponse
|
||||
(*CreateNamespaceResponse)(nil), // 23: headscale.v1.CreateNamespaceResponse
|
||||
(*RenameNamespaceResponse)(nil), // 24: headscale.v1.RenameNamespaceResponse
|
||||
(*DeleteNamespaceResponse)(nil), // 25: headscale.v1.DeleteNamespaceResponse
|
||||
(*ListNamespacesResponse)(nil), // 26: headscale.v1.ListNamespacesResponse
|
||||
(*CreatePreAuthKeyResponse)(nil), // 27: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 28: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysResponse)(nil), // 29: headscale.v1.ListPreAuthKeysResponse
|
||||
(*DebugCreateMachineResponse)(nil), // 30: headscale.v1.DebugCreateMachineResponse
|
||||
(*GetMachineResponse)(nil), // 31: headscale.v1.GetMachineResponse
|
||||
(*SetTagsResponse)(nil), // 32: headscale.v1.SetTagsResponse
|
||||
(*RegisterMachineResponse)(nil), // 33: headscale.v1.RegisterMachineResponse
|
||||
(*DeleteMachineResponse)(nil), // 34: headscale.v1.DeleteMachineResponse
|
||||
(*ExpireMachineResponse)(nil), // 35: headscale.v1.ExpireMachineResponse
|
||||
(*RenameMachineResponse)(nil), // 36: headscale.v1.RenameMachineResponse
|
||||
(*ListMachinesResponse)(nil), // 37: headscale.v1.ListMachinesResponse
|
||||
(*MoveMachineResponse)(nil), // 38: headscale.v1.MoveMachineResponse
|
||||
(*GetMachineRouteResponse)(nil), // 39: headscale.v1.GetMachineRouteResponse
|
||||
(*EnableMachineRoutesResponse)(nil), // 40: headscale.v1.EnableMachineRoutesResponse
|
||||
(*CreateApiKeyResponse)(nil), // 41: headscale.v1.CreateApiKeyResponse
|
||||
(*ExpireApiKeyResponse)(nil), // 42: headscale.v1.ExpireApiKeyResponse
|
||||
(*ListApiKeysResponse)(nil), // 43: headscale.v1.ListApiKeysResponse
|
||||
}
|
||||
var file_headscale_v1_headscale_proto_depIdxs = []int32{
|
||||
0, // 0: headscale.v1.HeadscaleService.GetNamespace:input_type -> headscale.v1.GetNamespaceRequest
|
||||
@@ -248,36 +278,42 @@ var file_headscale_v1_headscale_proto_depIdxs = []int32{
|
||||
7, // 7: headscale.v1.HeadscaleService.ListPreAuthKeys:input_type -> headscale.v1.ListPreAuthKeysRequest
|
||||
8, // 8: headscale.v1.HeadscaleService.DebugCreateMachine:input_type -> headscale.v1.DebugCreateMachineRequest
|
||||
9, // 9: headscale.v1.HeadscaleService.GetMachine:input_type -> headscale.v1.GetMachineRequest
|
||||
10, // 10: headscale.v1.HeadscaleService.RegisterMachine:input_type -> headscale.v1.RegisterMachineRequest
|
||||
11, // 11: headscale.v1.HeadscaleService.DeleteMachine:input_type -> headscale.v1.DeleteMachineRequest
|
||||
12, // 12: headscale.v1.HeadscaleService.ExpireMachine:input_type -> headscale.v1.ExpireMachineRequest
|
||||
13, // 13: headscale.v1.HeadscaleService.ListMachines:input_type -> headscale.v1.ListMachinesRequest
|
||||
14, // 14: headscale.v1.HeadscaleService.GetMachineRoute:input_type -> headscale.v1.GetMachineRouteRequest
|
||||
15, // 15: headscale.v1.HeadscaleService.EnableMachineRoutes:input_type -> headscale.v1.EnableMachineRoutesRequest
|
||||
16, // 16: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
|
||||
17, // 17: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
|
||||
18, // 18: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
|
||||
19, // 19: headscale.v1.HeadscaleService.GetNamespace:output_type -> headscale.v1.GetNamespaceResponse
|
||||
20, // 20: headscale.v1.HeadscaleService.CreateNamespace:output_type -> headscale.v1.CreateNamespaceResponse
|
||||
21, // 21: headscale.v1.HeadscaleService.RenameNamespace:output_type -> headscale.v1.RenameNamespaceResponse
|
||||
22, // 22: headscale.v1.HeadscaleService.DeleteNamespace:output_type -> headscale.v1.DeleteNamespaceResponse
|
||||
23, // 23: headscale.v1.HeadscaleService.ListNamespaces:output_type -> headscale.v1.ListNamespacesResponse
|
||||
24, // 24: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
|
||||
25, // 25: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
|
||||
26, // 26: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
|
||||
27, // 27: headscale.v1.HeadscaleService.DebugCreateMachine:output_type -> headscale.v1.DebugCreateMachineResponse
|
||||
28, // 28: headscale.v1.HeadscaleService.GetMachine:output_type -> headscale.v1.GetMachineResponse
|
||||
29, // 29: headscale.v1.HeadscaleService.RegisterMachine:output_type -> headscale.v1.RegisterMachineResponse
|
||||
30, // 30: headscale.v1.HeadscaleService.DeleteMachine:output_type -> headscale.v1.DeleteMachineResponse
|
||||
31, // 31: headscale.v1.HeadscaleService.ExpireMachine:output_type -> headscale.v1.ExpireMachineResponse
|
||||
32, // 32: headscale.v1.HeadscaleService.ListMachines:output_type -> headscale.v1.ListMachinesResponse
|
||||
33, // 33: headscale.v1.HeadscaleService.GetMachineRoute:output_type -> headscale.v1.GetMachineRouteResponse
|
||||
34, // 34: headscale.v1.HeadscaleService.EnableMachineRoutes:output_type -> headscale.v1.EnableMachineRoutesResponse
|
||||
35, // 35: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
|
||||
36, // 36: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
|
||||
37, // 37: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
|
||||
19, // [19:38] is the sub-list for method output_type
|
||||
0, // [0:19] is the sub-list for method input_type
|
||||
10, // 10: headscale.v1.HeadscaleService.SetTags:input_type -> headscale.v1.SetTagsRequest
|
||||
11, // 11: headscale.v1.HeadscaleService.RegisterMachine:input_type -> headscale.v1.RegisterMachineRequest
|
||||
12, // 12: headscale.v1.HeadscaleService.DeleteMachine:input_type -> headscale.v1.DeleteMachineRequest
|
||||
13, // 13: headscale.v1.HeadscaleService.ExpireMachine:input_type -> headscale.v1.ExpireMachineRequest
|
||||
14, // 14: headscale.v1.HeadscaleService.RenameMachine:input_type -> headscale.v1.RenameMachineRequest
|
||||
15, // 15: headscale.v1.HeadscaleService.ListMachines:input_type -> headscale.v1.ListMachinesRequest
|
||||
16, // 16: headscale.v1.HeadscaleService.MoveMachine:input_type -> headscale.v1.MoveMachineRequest
|
||||
17, // 17: headscale.v1.HeadscaleService.GetMachineRoute:input_type -> headscale.v1.GetMachineRouteRequest
|
||||
18, // 18: headscale.v1.HeadscaleService.EnableMachineRoutes:input_type -> headscale.v1.EnableMachineRoutesRequest
|
||||
19, // 19: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
|
||||
20, // 20: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
|
||||
21, // 21: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
|
||||
22, // 22: headscale.v1.HeadscaleService.GetNamespace:output_type -> headscale.v1.GetNamespaceResponse
|
||||
23, // 23: headscale.v1.HeadscaleService.CreateNamespace:output_type -> headscale.v1.CreateNamespaceResponse
|
||||
24, // 24: headscale.v1.HeadscaleService.RenameNamespace:output_type -> headscale.v1.RenameNamespaceResponse
|
||||
25, // 25: headscale.v1.HeadscaleService.DeleteNamespace:output_type -> headscale.v1.DeleteNamespaceResponse
|
||||
26, // 26: headscale.v1.HeadscaleService.ListNamespaces:output_type -> headscale.v1.ListNamespacesResponse
|
||||
27, // 27: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
|
||||
28, // 28: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
|
||||
29, // 29: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
|
||||
30, // 30: headscale.v1.HeadscaleService.DebugCreateMachine:output_type -> headscale.v1.DebugCreateMachineResponse
|
||||
31, // 31: headscale.v1.HeadscaleService.GetMachine:output_type -> headscale.v1.GetMachineResponse
|
||||
32, // 32: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
|
||||
33, // 33: headscale.v1.HeadscaleService.RegisterMachine:output_type -> headscale.v1.RegisterMachineResponse
|
||||
34, // 34: headscale.v1.HeadscaleService.DeleteMachine:output_type -> headscale.v1.DeleteMachineResponse
|
||||
35, // 35: headscale.v1.HeadscaleService.ExpireMachine:output_type -> headscale.v1.ExpireMachineResponse
|
||||
36, // 36: headscale.v1.HeadscaleService.RenameMachine:output_type -> headscale.v1.RenameMachineResponse
|
||||
37, // 37: headscale.v1.HeadscaleService.ListMachines:output_type -> headscale.v1.ListMachinesResponse
|
||||
38, // 38: headscale.v1.HeadscaleService.MoveMachine:output_type -> headscale.v1.MoveMachineResponse
|
||||
39, // 39: headscale.v1.HeadscaleService.GetMachineRoute:output_type -> headscale.v1.GetMachineRouteResponse
|
||||
40, // 40: headscale.v1.HeadscaleService.EnableMachineRoutes:output_type -> headscale.v1.EnableMachineRoutesResponse
|
||||
41, // 41: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
|
||||
42, // 42: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
|
||||
43, // 43: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
|
||||
22, // [22:44] is the sub-list for method output_type
|
||||
0, // [0:22] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -35,10 +35,13 @@ type HeadscaleServiceClient interface {
|
||||
// --- Machine start ---
|
||||
DebugCreateMachine(ctx context.Context, in *DebugCreateMachineRequest, opts ...grpc.CallOption) (*DebugCreateMachineResponse, error)
|
||||
GetMachine(ctx context.Context, in *GetMachineRequest, opts ...grpc.CallOption) (*GetMachineResponse, error)
|
||||
SetTags(ctx context.Context, in *SetTagsRequest, opts ...grpc.CallOption) (*SetTagsResponse, error)
|
||||
RegisterMachine(ctx context.Context, in *RegisterMachineRequest, opts ...grpc.CallOption) (*RegisterMachineResponse, error)
|
||||
DeleteMachine(ctx context.Context, in *DeleteMachineRequest, opts ...grpc.CallOption) (*DeleteMachineResponse, error)
|
||||
ExpireMachine(ctx context.Context, in *ExpireMachineRequest, opts ...grpc.CallOption) (*ExpireMachineResponse, error)
|
||||
RenameMachine(ctx context.Context, in *RenameMachineRequest, opts ...grpc.CallOption) (*RenameMachineResponse, error)
|
||||
ListMachines(ctx context.Context, in *ListMachinesRequest, opts ...grpc.CallOption) (*ListMachinesResponse, error)
|
||||
MoveMachine(ctx context.Context, in *MoveMachineRequest, opts ...grpc.CallOption) (*MoveMachineResponse, error)
|
||||
// --- Route start ---
|
||||
GetMachineRoute(ctx context.Context, in *GetMachineRouteRequest, opts ...grpc.CallOption) (*GetMachineRouteResponse, error)
|
||||
EnableMachineRoutes(ctx context.Context, in *EnableMachineRoutesRequest, opts ...grpc.CallOption) (*EnableMachineRoutesResponse, error)
|
||||
@@ -146,6 +149,15 @@ func (c *headscaleServiceClient) GetMachine(ctx context.Context, in *GetMachineR
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) SetTags(ctx context.Context, in *SetTagsRequest, opts ...grpc.CallOption) (*SetTagsResponse, error) {
|
||||
out := new(SetTagsResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/SetTags", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) RegisterMachine(ctx context.Context, in *RegisterMachineRequest, opts ...grpc.CallOption) (*RegisterMachineResponse, error) {
|
||||
out := new(RegisterMachineResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/RegisterMachine", in, out, opts...)
|
||||
@@ -173,6 +185,15 @@ func (c *headscaleServiceClient) ExpireMachine(ctx context.Context, in *ExpireMa
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) RenameMachine(ctx context.Context, in *RenameMachineRequest, opts ...grpc.CallOption) (*RenameMachineResponse, error) {
|
||||
out := new(RenameMachineResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/RenameMachine", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ListMachines(ctx context.Context, in *ListMachinesRequest, opts ...grpc.CallOption) (*ListMachinesResponse, error) {
|
||||
out := new(ListMachinesResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/ListMachines", in, out, opts...)
|
||||
@@ -182,6 +203,15 @@ func (c *headscaleServiceClient) ListMachines(ctx context.Context, in *ListMachi
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) MoveMachine(ctx context.Context, in *MoveMachineRequest, opts ...grpc.CallOption) (*MoveMachineResponse, error) {
|
||||
out := new(MoveMachineResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/MoveMachine", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) GetMachineRoute(ctx context.Context, in *GetMachineRouteRequest, opts ...grpc.CallOption) (*GetMachineRouteResponse, error) {
|
||||
out := new(GetMachineRouteResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/GetMachineRoute", in, out, opts...)
|
||||
@@ -244,10 +274,13 @@ type HeadscaleServiceServer interface {
|
||||
// --- Machine start ---
|
||||
DebugCreateMachine(context.Context, *DebugCreateMachineRequest) (*DebugCreateMachineResponse, error)
|
||||
GetMachine(context.Context, *GetMachineRequest) (*GetMachineResponse, error)
|
||||
SetTags(context.Context, *SetTagsRequest) (*SetTagsResponse, error)
|
||||
RegisterMachine(context.Context, *RegisterMachineRequest) (*RegisterMachineResponse, error)
|
||||
DeleteMachine(context.Context, *DeleteMachineRequest) (*DeleteMachineResponse, error)
|
||||
ExpireMachine(context.Context, *ExpireMachineRequest) (*ExpireMachineResponse, error)
|
||||
RenameMachine(context.Context, *RenameMachineRequest) (*RenameMachineResponse, error)
|
||||
ListMachines(context.Context, *ListMachinesRequest) (*ListMachinesResponse, error)
|
||||
MoveMachine(context.Context, *MoveMachineRequest) (*MoveMachineResponse, error)
|
||||
// --- Route start ---
|
||||
GetMachineRoute(context.Context, *GetMachineRouteRequest) (*GetMachineRouteResponse, error)
|
||||
EnableMachineRoutes(context.Context, *EnableMachineRoutesRequest) (*EnableMachineRoutesResponse, error)
|
||||
@@ -292,6 +325,9 @@ func (UnimplementedHeadscaleServiceServer) DebugCreateMachine(context.Context, *
|
||||
func (UnimplementedHeadscaleServiceServer) GetMachine(context.Context, *GetMachineRequest) (*GetMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetMachine not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) SetTags(context.Context, *SetTagsRequest) (*SetTagsResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method SetTags not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) RegisterMachine(context.Context, *RegisterMachineRequest) (*RegisterMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RegisterMachine not implemented")
|
||||
}
|
||||
@@ -301,9 +337,15 @@ func (UnimplementedHeadscaleServiceServer) DeleteMachine(context.Context, *Delet
|
||||
func (UnimplementedHeadscaleServiceServer) ExpireMachine(context.Context, *ExpireMachineRequest) (*ExpireMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ExpireMachine not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) RenameMachine(context.Context, *RenameMachineRequest) (*RenameMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RenameMachine not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ListMachines(context.Context, *ListMachinesRequest) (*ListMachinesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListMachines not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) MoveMachine(context.Context, *MoveMachineRequest) (*MoveMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method MoveMachine not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) GetMachineRoute(context.Context, *GetMachineRouteRequest) (*GetMachineRouteResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetMachineRoute not implemented")
|
||||
}
|
||||
@@ -512,6 +554,24 @@ func _HeadscaleService_GetMachine_Handler(srv interface{}, ctx context.Context,
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_SetTags_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(SetTagsRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).SetTags(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/SetTags",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).SetTags(ctx, req.(*SetTagsRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_RegisterMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(RegisterMachineRequest)
|
||||
if err := dec(in); err != nil {
|
||||
@@ -566,6 +626,24 @@ func _HeadscaleService_ExpireMachine_Handler(srv interface{}, ctx context.Contex
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_RenameMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(RenameMachineRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).RenameMachine(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/RenameMachine",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).RenameMachine(ctx, req.(*RenameMachineRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_ListMachines_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ListMachinesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
@@ -584,6 +662,24 @@ func _HeadscaleService_ListMachines_Handler(srv interface{}, ctx context.Context
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_MoveMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(MoveMachineRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).MoveMachine(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/MoveMachine",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).MoveMachine(ctx, req.(*MoveMachineRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_GetMachineRoute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetMachineRouteRequest)
|
||||
if err := dec(in); err != nil {
|
||||
@@ -721,6 +817,10 @@ var HeadscaleService_ServiceDesc = grpc.ServiceDesc{
|
||||
MethodName: "GetMachine",
|
||||
Handler: _HeadscaleService_GetMachine_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "SetTags",
|
||||
Handler: _HeadscaleService_SetTags_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "RegisterMachine",
|
||||
Handler: _HeadscaleService_RegisterMachine_Handler,
|
||||
@@ -733,10 +833,18 @@ var HeadscaleService_ServiceDesc = grpc.ServiceDesc{
|
||||
MethodName: "ExpireMachine",
|
||||
Handler: _HeadscaleService_ExpireMachine_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "RenameMachine",
|
||||
Handler: _HeadscaleService_RenameMachine_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ListMachines",
|
||||
Handler: _HeadscaleService_ListMachines_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "MoveMachine",
|
||||
Handler: _HeadscaleService_MoveMachine_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetMachineRoute",
|
||||
Handler: _HeadscaleService_GetMachineRoute_Handler,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/machine.proto
|
||||
|
||||
@@ -91,6 +91,10 @@ type Machine struct {
|
||||
PreAuthKey *PreAuthKey `protobuf:"bytes,11,opt,name=pre_auth_key,json=preAuthKey,proto3" json:"pre_auth_key,omitempty"`
|
||||
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
||||
RegisterMethod RegisterMethod `protobuf:"varint,13,opt,name=register_method,json=registerMethod,proto3,enum=headscale.v1.RegisterMethod" json:"register_method,omitempty"`
|
||||
ForcedTags []string `protobuf:"bytes,18,rep,name=forced_tags,json=forcedTags,proto3" json:"forced_tags,omitempty"`
|
||||
InvalidTags []string `protobuf:"bytes,19,rep,name=invalid_tags,json=invalidTags,proto3" json:"invalid_tags,omitempty"`
|
||||
ValidTags []string `protobuf:"bytes,20,rep,name=valid_tags,json=validTags,proto3" json:"valid_tags,omitempty"`
|
||||
GivenName string `protobuf:"bytes,21,opt,name=given_name,json=givenName,proto3" json:"given_name,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Machine) Reset() {
|
||||
@@ -216,6 +220,34 @@ func (x *Machine) GetRegisterMethod() RegisterMethod {
|
||||
return RegisterMethod_REGISTER_METHOD_UNSPECIFIED
|
||||
}
|
||||
|
||||
func (x *Machine) GetForcedTags() []string {
|
||||
if x != nil {
|
||||
return x.ForcedTags
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Machine) GetInvalidTags() []string {
|
||||
if x != nil {
|
||||
return x.InvalidTags
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Machine) GetValidTags() []string {
|
||||
if x != nil {
|
||||
return x.ValidTags
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Machine) GetGivenName() string {
|
||||
if x != nil {
|
||||
return x.GivenName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type RegisterMachineRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
@@ -412,6 +444,108 @@ func (x *GetMachineResponse) GetMachine() *Machine {
|
||||
return nil
|
||||
}
|
||||
|
||||
type SetTagsRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
MachineId uint64 `protobuf:"varint,1,opt,name=machine_id,json=machineId,proto3" json:"machine_id,omitempty"`
|
||||
Tags []string `protobuf:"bytes,2,rep,name=tags,proto3" json:"tags,omitempty"`
|
||||
}
|
||||
|
||||
func (x *SetTagsRequest) Reset() {
|
||||
*x = SetTagsRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *SetTagsRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SetTagsRequest) ProtoMessage() {}
|
||||
|
||||
func (x *SetTagsRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[5]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SetTagsRequest.ProtoReflect.Descriptor instead.
|
||||
func (*SetTagsRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_machine_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *SetTagsRequest) GetMachineId() uint64 {
|
||||
if x != nil {
|
||||
return x.MachineId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *SetTagsRequest) GetTags() []string {
|
||||
if x != nil {
|
||||
return x.Tags
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type SetTagsResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Machine *Machine `protobuf:"bytes,1,opt,name=machine,proto3" json:"machine,omitempty"`
|
||||
}
|
||||
|
||||
func (x *SetTagsResponse) Reset() {
|
||||
*x = SetTagsResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *SetTagsResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SetTagsResponse) ProtoMessage() {}
|
||||
|
||||
func (x *SetTagsResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[6]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SetTagsResponse.ProtoReflect.Descriptor instead.
|
||||
func (*SetTagsResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_machine_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
func (x *SetTagsResponse) GetMachine() *Machine {
|
||||
if x != nil {
|
||||
return x.Machine
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type DeleteMachineRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
@@ -423,7 +557,7 @@ type DeleteMachineRequest struct {
|
||||
func (x *DeleteMachineRequest) Reset() {
|
||||
*x = DeleteMachineRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[5]
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[7]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -436,7 +570,7 @@ func (x *DeleteMachineRequest) String() string {
|
||||
func (*DeleteMachineRequest) ProtoMessage() {}
|
||||
|
||||
func (x *DeleteMachineRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[5]
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[7]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -449,7 +583,7 @@ func (x *DeleteMachineRequest) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use DeleteMachineRequest.ProtoReflect.Descriptor instead.
|
||||
func (*DeleteMachineRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_machine_proto_rawDescGZIP(), []int{5}
|
||||
return file_headscale_v1_machine_proto_rawDescGZIP(), []int{7}
|
||||
}
|
||||
|
||||
func (x *DeleteMachineRequest) GetMachineId() uint64 {
|
||||
@@ -468,7 +602,7 @@ type DeleteMachineResponse struct {
|
||||
func (x *DeleteMachineResponse) Reset() {
|
||||
*x = DeleteMachineResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[6]
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[8]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -481,7 +615,7 @@ func (x *DeleteMachineResponse) String() string {
|
||||
func (*DeleteMachineResponse) ProtoMessage() {}
|
||||
|
||||
func (x *DeleteMachineResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[6]
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[8]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -494,7 +628,7 @@ func (x *DeleteMachineResponse) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use DeleteMachineResponse.ProtoReflect.Descriptor instead.
|
||||
func (*DeleteMachineResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_machine_proto_rawDescGZIP(), []int{6}
|
||||
return file_headscale_v1_machine_proto_rawDescGZIP(), []int{8}
|
||||
}
|
||||
|
||||
type ExpireMachineRequest struct {
|
||||
@@ -508,7 +642,7 @@ type ExpireMachineRequest struct {
|
||||
func (x *ExpireMachineRequest) Reset() {
|
||||
*x = ExpireMachineRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[7]
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[9]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -521,7 +655,7 @@ func (x *ExpireMachineRequest) String() string {
|
||||
func (*ExpireMachineRequest) ProtoMessage() {}
|
||||
|
||||
func (x *ExpireMachineRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[7]
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[9]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -534,7 +668,7 @@ func (x *ExpireMachineRequest) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use ExpireMachineRequest.ProtoReflect.Descriptor instead.
|
||||
func (*ExpireMachineRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_machine_proto_rawDescGZIP(), []int{7}
|
||||
return file_headscale_v1_machine_proto_rawDescGZIP(), []int{9}
|
||||
}
|
||||
|
||||
func (x *ExpireMachineRequest) GetMachineId() uint64 {
|
||||
@@ -555,7 +689,7 @@ type ExpireMachineResponse struct {
|
||||
func (x *ExpireMachineResponse) Reset() {
|
||||
*x = ExpireMachineResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[8]
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[10]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -568,7 +702,7 @@ func (x *ExpireMachineResponse) String() string {
|
||||
func (*ExpireMachineResponse) ProtoMessage() {}
|
||||
|
||||
func (x *ExpireMachineResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[8]
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[10]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -581,7 +715,7 @@ func (x *ExpireMachineResponse) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use ExpireMachineResponse.ProtoReflect.Descriptor instead.
|
||||
func (*ExpireMachineResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_machine_proto_rawDescGZIP(), []int{8}
|
||||
return file_headscale_v1_machine_proto_rawDescGZIP(), []int{10}
|
||||
}
|
||||
|
||||
func (x *ExpireMachineResponse) GetMachine() *Machine {
|
||||
@@ -591,6 +725,108 @@ func (x *ExpireMachineResponse) GetMachine() *Machine {
|
||||
return nil
|
||||
}
|
||||
|
||||
type RenameMachineRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
MachineId uint64 `protobuf:"varint,1,opt,name=machine_id,json=machineId,proto3" json:"machine_id,omitempty"`
|
||||
NewName string `protobuf:"bytes,2,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"`
|
||||
}
|
||||
|
||||
func (x *RenameMachineRequest) Reset() {
|
||||
*x = RenameMachineRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[11]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *RenameMachineRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*RenameMachineRequest) ProtoMessage() {}
|
||||
|
||||
func (x *RenameMachineRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[11]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use RenameMachineRequest.ProtoReflect.Descriptor instead.
|
||||
func (*RenameMachineRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_machine_proto_rawDescGZIP(), []int{11}
|
||||
}
|
||||
|
||||
func (x *RenameMachineRequest) GetMachineId() uint64 {
|
||||
if x != nil {
|
||||
return x.MachineId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *RenameMachineRequest) GetNewName() string {
|
||||
if x != nil {
|
||||
return x.NewName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type RenameMachineResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Machine *Machine `protobuf:"bytes,1,opt,name=machine,proto3" json:"machine,omitempty"`
|
||||
}
|
||||
|
||||
func (x *RenameMachineResponse) Reset() {
|
||||
*x = RenameMachineResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[12]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *RenameMachineResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*RenameMachineResponse) ProtoMessage() {}
|
||||
|
||||
func (x *RenameMachineResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[12]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use RenameMachineResponse.ProtoReflect.Descriptor instead.
|
||||
func (*RenameMachineResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_machine_proto_rawDescGZIP(), []int{12}
|
||||
}
|
||||
|
||||
func (x *RenameMachineResponse) GetMachine() *Machine {
|
||||
if x != nil {
|
||||
return x.Machine
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ListMachinesRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
@@ -602,7 +838,7 @@ type ListMachinesRequest struct {
|
||||
func (x *ListMachinesRequest) Reset() {
|
||||
*x = ListMachinesRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[9]
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[13]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -615,7 +851,7 @@ func (x *ListMachinesRequest) String() string {
|
||||
func (*ListMachinesRequest) ProtoMessage() {}
|
||||
|
||||
func (x *ListMachinesRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[9]
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[13]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -628,7 +864,7 @@ func (x *ListMachinesRequest) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use ListMachinesRequest.ProtoReflect.Descriptor instead.
|
||||
func (*ListMachinesRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_machine_proto_rawDescGZIP(), []int{9}
|
||||
return file_headscale_v1_machine_proto_rawDescGZIP(), []int{13}
|
||||
}
|
||||
|
||||
func (x *ListMachinesRequest) GetNamespace() string {
|
||||
@@ -649,7 +885,7 @@ type ListMachinesResponse struct {
|
||||
func (x *ListMachinesResponse) Reset() {
|
||||
*x = ListMachinesResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[10]
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[14]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -662,7 +898,7 @@ func (x *ListMachinesResponse) String() string {
|
||||
func (*ListMachinesResponse) ProtoMessage() {}
|
||||
|
||||
func (x *ListMachinesResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[10]
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[14]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -675,7 +911,7 @@ func (x *ListMachinesResponse) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use ListMachinesResponse.ProtoReflect.Descriptor instead.
|
||||
func (*ListMachinesResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_machine_proto_rawDescGZIP(), []int{10}
|
||||
return file_headscale_v1_machine_proto_rawDescGZIP(), []int{14}
|
||||
}
|
||||
|
||||
func (x *ListMachinesResponse) GetMachines() []*Machine {
|
||||
@@ -685,6 +921,108 @@ func (x *ListMachinesResponse) GetMachines() []*Machine {
|
||||
return nil
|
||||
}
|
||||
|
||||
type MoveMachineRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
MachineId uint64 `protobuf:"varint,1,opt,name=machine_id,json=machineId,proto3" json:"machine_id,omitempty"`
|
||||
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||
}
|
||||
|
||||
func (x *MoveMachineRequest) Reset() {
|
||||
*x = MoveMachineRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[15]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *MoveMachineRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*MoveMachineRequest) ProtoMessage() {}
|
||||
|
||||
func (x *MoveMachineRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[15]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use MoveMachineRequest.ProtoReflect.Descriptor instead.
|
||||
func (*MoveMachineRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_machine_proto_rawDescGZIP(), []int{15}
|
||||
}
|
||||
|
||||
func (x *MoveMachineRequest) GetMachineId() uint64 {
|
||||
if x != nil {
|
||||
return x.MachineId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *MoveMachineRequest) GetNamespace() string {
|
||||
if x != nil {
|
||||
return x.Namespace
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type MoveMachineResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Machine *Machine `protobuf:"bytes,1,opt,name=machine,proto3" json:"machine,omitempty"`
|
||||
}
|
||||
|
||||
func (x *MoveMachineResponse) Reset() {
|
||||
*x = MoveMachineResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[16]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *MoveMachineResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*MoveMachineResponse) ProtoMessage() {}
|
||||
|
||||
func (x *MoveMachineResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[16]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use MoveMachineResponse.ProtoReflect.Descriptor instead.
|
||||
func (*MoveMachineResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_machine_proto_rawDescGZIP(), []int{16}
|
||||
}
|
||||
|
||||
func (x *MoveMachineResponse) GetMachine() *Machine {
|
||||
if x != nil {
|
||||
return x.Machine
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type DebugCreateMachineRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
@@ -699,7 +1037,7 @@ type DebugCreateMachineRequest struct {
|
||||
func (x *DebugCreateMachineRequest) Reset() {
|
||||
*x = DebugCreateMachineRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[11]
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[17]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -712,7 +1050,7 @@ func (x *DebugCreateMachineRequest) String() string {
|
||||
func (*DebugCreateMachineRequest) ProtoMessage() {}
|
||||
|
||||
func (x *DebugCreateMachineRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[11]
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[17]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -725,7 +1063,7 @@ func (x *DebugCreateMachineRequest) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use DebugCreateMachineRequest.ProtoReflect.Descriptor instead.
|
||||
func (*DebugCreateMachineRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_machine_proto_rawDescGZIP(), []int{11}
|
||||
return file_headscale_v1_machine_proto_rawDescGZIP(), []int{17}
|
||||
}
|
||||
|
||||
func (x *DebugCreateMachineRequest) GetNamespace() string {
|
||||
@@ -767,7 +1105,7 @@ type DebugCreateMachineResponse struct {
|
||||
func (x *DebugCreateMachineResponse) Reset() {
|
||||
*x = DebugCreateMachineResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[12]
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[18]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -780,7 +1118,7 @@ func (x *DebugCreateMachineResponse) String() string {
|
||||
func (*DebugCreateMachineResponse) ProtoMessage() {}
|
||||
|
||||
func (x *DebugCreateMachineResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[12]
|
||||
mi := &file_headscale_v1_machine_proto_msgTypes[18]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -793,7 +1131,7 @@ func (x *DebugCreateMachineResponse) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use DebugCreateMachineResponse.ProtoReflect.Descriptor instead.
|
||||
func (*DebugCreateMachineResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_machine_proto_rawDescGZIP(), []int{12}
|
||||
return file_headscale_v1_machine_proto_rawDescGZIP(), []int{18}
|
||||
}
|
||||
|
||||
func (x *DebugCreateMachineResponse) GetMachine() *Machine {
|
||||
@@ -814,7 +1152,7 @@ var file_headscale_v1_machine_proto_rawDesc = []byte{
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70,
|
||||
0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b,
|
||||
0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xdd, 0x04, 0x0a, 0x07, 0x4d, 0x61, 0x63,
|
||||
0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe5, 0x05, 0x0a, 0x07, 0x4d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04,
|
||||
0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f,
|
||||
0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69,
|
||||
@@ -852,69 +1190,106 @@ var file_headscale_v1_machine_proto_rawDesc = []byte{
|
||||
0x74, 0x68, 0x6f, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74,
|
||||
0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74,
|
||||
0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x22, 0x48, 0x0a, 0x16, 0x52, 0x65, 0x67, 0x69,
|
||||
0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65,
|
||||
0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
|
||||
0x65, 0x79, 0x22, 0x4a, 0x0a, 0x17, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61,
|
||||
0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a,
|
||||
0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61,
|
||||
0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x22, 0x32,
|
||||
0x0a, 0x11, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x6f, 0x72, 0x63,
|
||||
0x65, 0x64, 0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, 0x12, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x66,
|
||||
0x6f, 0x72, 0x63, 0x65, 0x64, 0x54, 0x61, 0x67, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x6e, 0x76,
|
||||
0x61, 0x6c, 0x69, 0x64, 0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x09, 0x52,
|
||||
0x0b, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x54, 0x61, 0x67, 0x73, 0x12, 0x1d, 0x0a, 0x0a,
|
||||
0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x09,
|
||||
0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x54, 0x61, 0x67, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x67,
|
||||
0x69, 0x76, 0x65, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x09, 0x67, 0x69, 0x76, 0x65, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x4a, 0x04, 0x08, 0x0e, 0x10, 0x12,
|
||||
0x22, 0x48, 0x0a, 0x16, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x63, 0x68,
|
||||
0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61,
|
||||
0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
|
||||
0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x4a, 0x0a, 0x17, 0x52, 0x65,
|
||||
0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x07, 0x6d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x22, 0x32, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52,
|
||||
0x09, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x49, 0x64, 0x22, 0x45, 0x0a, 0x12, 0x47, 0x65,
|
||||
0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x12, 0x2f, 0x0a, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x15, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x22, 0x43, 0x0a, 0x0e, 0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69,
|
||||
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x49, 0x64, 0x22, 0x45, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x6d, 0x61, 0x63, 0x68,
|
||||
0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x52, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x22, 0x35, 0x0a, 0x14, 0x44, 0x65, 0x6c,
|
||||
0x65, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x49, 0x64,
|
||||
0x22, 0x17, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x35, 0x0a, 0x14, 0x45, 0x78, 0x70,
|
||||
0x69, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x49, 0x64,
|
||||
0x22, 0x48, 0x0a, 0x15, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x6d, 0x61, 0x63,
|
||||
0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09,
|
||||
0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x22, 0x42, 0x0a, 0x0f, 0x53, 0x65, 0x74, 0x54, 0x61, 0x67,
|
||||
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x6d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x52, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x22, 0x33, 0x0a, 0x13, 0x4c, 0x69,
|
||||
0x73, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22,
|
||||
0x49, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x08, 0x6d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x6e, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x52, 0x08, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73, 0x22, 0x77, 0x0a, 0x19, 0x44, 0x65,
|
||||
0x62, 0x75, 0x67, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73,
|
||||
0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
|
||||
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72,
|
||||
0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x72, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x73, 0x22, 0x4d, 0x0a, 0x1a, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x72, 0x65, 0x61,
|
||||
0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x52, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x22, 0x35, 0x0a, 0x14, 0x44, 0x65,
|
||||
0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x49,
|
||||
0x64, 0x22, 0x17, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x35, 0x0a, 0x14, 0x45, 0x78,
|
||||
0x70, 0x69, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x49,
|
||||
0x64, 0x22, 0x48, 0x0a, 0x15, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x6d, 0x61,
|
||||
0x63, 0x68, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x6e, 0x65, 0x52, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x22, 0x50, 0x0a, 0x14, 0x52,
|
||||
0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69,
|
||||
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x48, 0x0a,
|
||||
0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x07,
|
||||
0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x22, 0x33, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c,
|
||||
0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x49, 0x0a, 0x14,
|
||||
0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x08, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73,
|
||||
0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x08, 0x6d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x12, 0x4d, 0x6f, 0x76, 0x65, 0x4d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a,
|
||||
0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x04, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x46, 0x0a, 0x13, 0x4d, 0x6f,
|
||||
0x76, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x12, 0x2f, 0x0a, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x15, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x6e, 0x65, 0x2a, 0x82, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4d,
|
||||
0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1f, 0x0a, 0x1b, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45,
|
||||
0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49,
|
||||
0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54,
|
||||
0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x41, 0x55, 0x54, 0x48, 0x5f, 0x4b,
|
||||
0x45, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52,
|
||||
0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x43, 0x4c, 0x49, 0x10, 0x02, 0x12, 0x18, 0x0a,
|
||||
0x14, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44,
|
||||
0x5f, 0x4f, 0x49, 0x44, 0x43, 0x10, 0x03, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75,
|
||||
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f,
|
||||
0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x6e, 0x65, 0x22, 0x77, 0x0a, 0x19, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x72, 0x65, 0x61, 0x74,
|
||||
0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
|
||||
0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a,
|
||||
0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
|
||||
0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20,
|
||||
0x03, 0x28, 0x09, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0x4d, 0x0a, 0x1a, 0x44,
|
||||
0x65, 0x62, 0x75, 0x67, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x6d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x52, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2a, 0x82, 0x01, 0x0a, 0x0e, 0x52,
|
||||
0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1f, 0x0a,
|
||||
0x1b, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44,
|
||||
0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1c,
|
||||
0x0a, 0x18, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f,
|
||||
0x44, 0x5f, 0x41, 0x55, 0x54, 0x48, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13,
|
||||
0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f,
|
||||
0x43, 0x4c, 0x49, 0x10, 0x02, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45,
|
||||
0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x4f, 0x49, 0x44, 0x43, 0x10, 0x03, 0x42,
|
||||
0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75,
|
||||
0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -930,7 +1305,7 @@ func file_headscale_v1_machine_proto_rawDescGZIP() []byte {
|
||||
}
|
||||
|
||||
var file_headscale_v1_machine_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
|
||||
var file_headscale_v1_machine_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
|
||||
var file_headscale_v1_machine_proto_msgTypes = make([]protoimpl.MessageInfo, 19)
|
||||
var file_headscale_v1_machine_proto_goTypes = []interface{}{
|
||||
(RegisterMethod)(0), // 0: headscale.v1.RegisterMethod
|
||||
(*Machine)(nil), // 1: headscale.v1.Machine
|
||||
@@ -938,36 +1313,45 @@ var file_headscale_v1_machine_proto_goTypes = []interface{}{
|
||||
(*RegisterMachineResponse)(nil), // 3: headscale.v1.RegisterMachineResponse
|
||||
(*GetMachineRequest)(nil), // 4: headscale.v1.GetMachineRequest
|
||||
(*GetMachineResponse)(nil), // 5: headscale.v1.GetMachineResponse
|
||||
(*DeleteMachineRequest)(nil), // 6: headscale.v1.DeleteMachineRequest
|
||||
(*DeleteMachineResponse)(nil), // 7: headscale.v1.DeleteMachineResponse
|
||||
(*ExpireMachineRequest)(nil), // 8: headscale.v1.ExpireMachineRequest
|
||||
(*ExpireMachineResponse)(nil), // 9: headscale.v1.ExpireMachineResponse
|
||||
(*ListMachinesRequest)(nil), // 10: headscale.v1.ListMachinesRequest
|
||||
(*ListMachinesResponse)(nil), // 11: headscale.v1.ListMachinesResponse
|
||||
(*DebugCreateMachineRequest)(nil), // 12: headscale.v1.DebugCreateMachineRequest
|
||||
(*DebugCreateMachineResponse)(nil), // 13: headscale.v1.DebugCreateMachineResponse
|
||||
(*Namespace)(nil), // 14: headscale.v1.Namespace
|
||||
(*timestamppb.Timestamp)(nil), // 15: google.protobuf.Timestamp
|
||||
(*PreAuthKey)(nil), // 16: headscale.v1.PreAuthKey
|
||||
(*SetTagsRequest)(nil), // 6: headscale.v1.SetTagsRequest
|
||||
(*SetTagsResponse)(nil), // 7: headscale.v1.SetTagsResponse
|
||||
(*DeleteMachineRequest)(nil), // 8: headscale.v1.DeleteMachineRequest
|
||||
(*DeleteMachineResponse)(nil), // 9: headscale.v1.DeleteMachineResponse
|
||||
(*ExpireMachineRequest)(nil), // 10: headscale.v1.ExpireMachineRequest
|
||||
(*ExpireMachineResponse)(nil), // 11: headscale.v1.ExpireMachineResponse
|
||||
(*RenameMachineRequest)(nil), // 12: headscale.v1.RenameMachineRequest
|
||||
(*RenameMachineResponse)(nil), // 13: headscale.v1.RenameMachineResponse
|
||||
(*ListMachinesRequest)(nil), // 14: headscale.v1.ListMachinesRequest
|
||||
(*ListMachinesResponse)(nil), // 15: headscale.v1.ListMachinesResponse
|
||||
(*MoveMachineRequest)(nil), // 16: headscale.v1.MoveMachineRequest
|
||||
(*MoveMachineResponse)(nil), // 17: headscale.v1.MoveMachineResponse
|
||||
(*DebugCreateMachineRequest)(nil), // 18: headscale.v1.DebugCreateMachineRequest
|
||||
(*DebugCreateMachineResponse)(nil), // 19: headscale.v1.DebugCreateMachineResponse
|
||||
(*Namespace)(nil), // 20: headscale.v1.Namespace
|
||||
(*timestamppb.Timestamp)(nil), // 21: google.protobuf.Timestamp
|
||||
(*PreAuthKey)(nil), // 22: headscale.v1.PreAuthKey
|
||||
}
|
||||
var file_headscale_v1_machine_proto_depIdxs = []int32{
|
||||
14, // 0: headscale.v1.Machine.namespace:type_name -> headscale.v1.Namespace
|
||||
15, // 1: headscale.v1.Machine.last_seen:type_name -> google.protobuf.Timestamp
|
||||
15, // 2: headscale.v1.Machine.last_successful_update:type_name -> google.protobuf.Timestamp
|
||||
15, // 3: headscale.v1.Machine.expiry:type_name -> google.protobuf.Timestamp
|
||||
16, // 4: headscale.v1.Machine.pre_auth_key:type_name -> headscale.v1.PreAuthKey
|
||||
15, // 5: headscale.v1.Machine.created_at:type_name -> google.protobuf.Timestamp
|
||||
20, // 0: headscale.v1.Machine.namespace:type_name -> headscale.v1.Namespace
|
||||
21, // 1: headscale.v1.Machine.last_seen:type_name -> google.protobuf.Timestamp
|
||||
21, // 2: headscale.v1.Machine.last_successful_update:type_name -> google.protobuf.Timestamp
|
||||
21, // 3: headscale.v1.Machine.expiry:type_name -> google.protobuf.Timestamp
|
||||
22, // 4: headscale.v1.Machine.pre_auth_key:type_name -> headscale.v1.PreAuthKey
|
||||
21, // 5: headscale.v1.Machine.created_at:type_name -> google.protobuf.Timestamp
|
||||
0, // 6: headscale.v1.Machine.register_method:type_name -> headscale.v1.RegisterMethod
|
||||
1, // 7: headscale.v1.RegisterMachineResponse.machine:type_name -> headscale.v1.Machine
|
||||
1, // 8: headscale.v1.GetMachineResponse.machine:type_name -> headscale.v1.Machine
|
||||
1, // 9: headscale.v1.ExpireMachineResponse.machine:type_name -> headscale.v1.Machine
|
||||
1, // 10: headscale.v1.ListMachinesResponse.machines:type_name -> headscale.v1.Machine
|
||||
1, // 11: headscale.v1.DebugCreateMachineResponse.machine:type_name -> headscale.v1.Machine
|
||||
12, // [12:12] is the sub-list for method output_type
|
||||
12, // [12:12] is the sub-list for method input_type
|
||||
12, // [12:12] is the sub-list for extension type_name
|
||||
12, // [12:12] is the sub-list for extension extendee
|
||||
0, // [0:12] is the sub-list for field type_name
|
||||
1, // 9: headscale.v1.SetTagsResponse.machine:type_name -> headscale.v1.Machine
|
||||
1, // 10: headscale.v1.ExpireMachineResponse.machine:type_name -> headscale.v1.Machine
|
||||
1, // 11: headscale.v1.RenameMachineResponse.machine:type_name -> headscale.v1.Machine
|
||||
1, // 12: headscale.v1.ListMachinesResponse.machines:type_name -> headscale.v1.Machine
|
||||
1, // 13: headscale.v1.MoveMachineResponse.machine:type_name -> headscale.v1.Machine
|
||||
1, // 14: headscale.v1.DebugCreateMachineResponse.machine:type_name -> headscale.v1.Machine
|
||||
15, // [15:15] is the sub-list for method output_type
|
||||
15, // [15:15] is the sub-list for method input_type
|
||||
15, // [15:15] is the sub-list for extension type_name
|
||||
15, // [15:15] is the sub-list for extension extendee
|
||||
0, // [0:15] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_headscale_v1_machine_proto_init() }
|
||||
@@ -1039,7 +1423,7 @@ func file_headscale_v1_machine_proto_init() {
|
||||
}
|
||||
}
|
||||
file_headscale_v1_machine_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DeleteMachineRequest); i {
|
||||
switch v := v.(*SetTagsRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
@@ -1051,7 +1435,7 @@ func file_headscale_v1_machine_proto_init() {
|
||||
}
|
||||
}
|
||||
file_headscale_v1_machine_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DeleteMachineResponse); i {
|
||||
switch v := v.(*SetTagsResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
@@ -1063,7 +1447,7 @@ func file_headscale_v1_machine_proto_init() {
|
||||
}
|
||||
}
|
||||
file_headscale_v1_machine_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ExpireMachineRequest); i {
|
||||
switch v := v.(*DeleteMachineRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
@@ -1075,7 +1459,7 @@ func file_headscale_v1_machine_proto_init() {
|
||||
}
|
||||
}
|
||||
file_headscale_v1_machine_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ExpireMachineResponse); i {
|
||||
switch v := v.(*DeleteMachineResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
@@ -1087,7 +1471,7 @@ func file_headscale_v1_machine_proto_init() {
|
||||
}
|
||||
}
|
||||
file_headscale_v1_machine_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ListMachinesRequest); i {
|
||||
switch v := v.(*ExpireMachineRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
@@ -1099,7 +1483,7 @@ func file_headscale_v1_machine_proto_init() {
|
||||
}
|
||||
}
|
||||
file_headscale_v1_machine_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ListMachinesResponse); i {
|
||||
switch v := v.(*ExpireMachineResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
@@ -1111,7 +1495,7 @@ func file_headscale_v1_machine_proto_init() {
|
||||
}
|
||||
}
|
||||
file_headscale_v1_machine_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DebugCreateMachineRequest); i {
|
||||
switch v := v.(*RenameMachineRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
@@ -1123,6 +1507,78 @@ func file_headscale_v1_machine_proto_init() {
|
||||
}
|
||||
}
|
||||
file_headscale_v1_machine_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*RenameMachineResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_machine_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ListMachinesRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_machine_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ListMachinesResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_machine_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*MoveMachineRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_machine_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*MoveMachineResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_machine_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DebugCreateMachineRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_machine_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DebugCreateMachineResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
@@ -1141,7 +1597,7 @@ func file_headscale_v1_machine_proto_init() {
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_headscale_v1_machine_proto_rawDesc,
|
||||
NumEnums: 1,
|
||||
NumMessages: 13,
|
||||
NumMessages: 19,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/namespace.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/preauthkey.proto
|
||||
|
||||
@@ -34,6 +34,7 @@ type PreAuthKey struct {
|
||||
Used bool `protobuf:"varint,6,opt,name=used,proto3" json:"used,omitempty"`
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
||||
AclTags []string `protobuf:"bytes,9,rep,name=acl_tags,json=aclTags,proto3" json:"acl_tags,omitempty"`
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) Reset() {
|
||||
@@ -124,6 +125,13 @@ func (x *PreAuthKey) GetCreatedAt() *timestamppb.Timestamp {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) GetAclTags() []string {
|
||||
if x != nil {
|
||||
return x.AclTags
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type CreatePreAuthKeyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
@@ -133,6 +141,7 @@ type CreatePreAuthKeyRequest struct {
|
||||
Reusable bool `protobuf:"varint,2,opt,name=reusable,proto3" json:"reusable,omitempty"`
|
||||
Ephemeral bool `protobuf:"varint,3,opt,name=ephemeral,proto3" json:"ephemeral,omitempty"`
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
AclTags []string `protobuf:"bytes,5,rep,name=acl_tags,json=aclTags,proto3" json:"acl_tags,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyRequest) Reset() {
|
||||
@@ -195,6 +204,13 @@ func (x *CreatePreAuthKeyRequest) GetExpiration() *timestamppb.Timestamp {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyRequest) GetAclTags() []string {
|
||||
if x != nil {
|
||||
return x.AclTags
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type CreatePreAuthKeyResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
@@ -436,7 +452,7 @@ var file_headscale_v1_preauthkey_proto_rawDesc = []byte{
|
||||
0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
|
||||
0x0c, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
|
||||
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x91,
|
||||
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xac,
|
||||
0x02, 0x0a, 0x0a, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x12, 0x1c, 0x0a,
|
||||
0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69,
|
||||
@@ -454,42 +470,45 @@ var file_headscale_v1_preauthkey_proto_rawDesc = []byte{
|
||||
0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
|
||||
0x41, 0x74, 0x22, 0xad, 0x01, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65,
|
||||
0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c,
|
||||
0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08,
|
||||
0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08,
|
||||
0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x70, 0x68, 0x65,
|
||||
0x6d, 0x65, 0x72, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65, 0x70, 0x68,
|
||||
0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
|
||||
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x22, 0x56, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41,
|
||||
0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a,
|
||||
0x0a, 0x0c, 0x70, 0x72, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x0a,
|
||||
0x70, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x22, 0x49, 0x0a, 0x17, 0x45, 0x78,
|
||||
0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
|
||||
0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70,
|
||||
0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x1a, 0x0a, 0x18, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50,
|
||||
0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x22, 0x36, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68,
|
||||
0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e,
|
||||
0x41, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x6c, 0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, 0x09,
|
||||
0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x6c, 0x54, 0x61, 0x67, 0x73, 0x22, 0xc8, 0x01,
|
||||
0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b,
|
||||
0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d,
|
||||
0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61,
|
||||
0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x75, 0x73, 0x61,
|
||||
0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x75, 0x73, 0x61,
|
||||
0x62, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c,
|
||||
0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61,
|
||||
0x6c, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
|
||||
0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
|
||||
0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a,
|
||||
0x08, 0x61, 0x63, 0x6c, 0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52,
|
||||
0x07, 0x61, 0x63, 0x6c, 0x54, 0x61, 0x67, 0x73, 0x22, 0x56, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61,
|
||||
0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x72, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68,
|
||||
0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74,
|
||||
0x68, 0x4b, 0x65, 0x79, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79,
|
||||
0x22, 0x49, 0x0a, 0x17, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74,
|
||||
0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x57, 0x0a, 0x17, 0x4c, 0x69, 0x73,
|
||||
0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68,
|
||||
0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x65, 0x41, 0x75,
|
||||
0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65,
|
||||
0x79, 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
||||
0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
|
||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x1a, 0x0a, 0x18, 0x45,
|
||||
0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x50,
|
||||
0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22,
|
||||
0x57, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65,
|
||||
0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72,
|
||||
0x65, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
|
||||
0x0b, 0x32, 0x18, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x70, 0x72, 0x65,
|
||||
0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68,
|
||||
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f,
|
||||
0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/routes.proto
|
||||
|
||||
|
||||
@@ -291,6 +291,80 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/machine/{machineId}/namespace": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_MoveMachine",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1MoveMachineResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "machineId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
},
|
||||
{
|
||||
"name": "namespace",
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/machine/{machineId}/rename/{newName}": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_RenameMachine",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1RenameMachineResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "machineId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
},
|
||||
{
|
||||
"name": "newName",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/machine/{machineId}/routes": {
|
||||
"get": {
|
||||
"summary": "--- Route start ---",
|
||||
@@ -362,6 +436,53 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/machine/{machineId}/tags": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_SetTags",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1SetTagsResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "machineId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
},
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"tags": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/namespace": {
|
||||
"get": {
|
||||
"operationId": "HeadscaleService_ListNamespaces",
|
||||
@@ -703,6 +824,12 @@
|
||||
"expiration": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"aclTags": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -906,6 +1033,35 @@
|
||||
},
|
||||
"registerMethod": {
|
||||
"$ref": "#/definitions/v1RegisterMethod"
|
||||
},
|
||||
"forcedTags": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"invalidTags": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"validTags": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"givenName": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1MoveMachineResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"machine": {
|
||||
"$ref": "#/definitions/v1Machine"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -952,6 +1108,12 @@
|
||||
"createdAt": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"aclTags": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -973,6 +1135,14 @@
|
||||
],
|
||||
"default": "REGISTER_METHOD_UNSPECIFIED"
|
||||
},
|
||||
"v1RenameMachineResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"machine": {
|
||||
"$ref": "#/definitions/v1Machine"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1RenameNamespaceResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -997,6 +1167,14 @@
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1SetTagsResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"machine": {
|
||||
"$ref": "#/definitions/v1Machine"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
198
go.mod
198
go.mod
@@ -1,148 +1,150 @@
|
||||
module github.com/juanfont/headscale
|
||||
|
||||
go 1.18
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/AlecAivazis/survey/v2 v2.3.4
|
||||
github.com/AlecAivazis/survey/v2 v2.3.6
|
||||
github.com/ccding/go-stun/stun v0.0.0-20200514191101-4dc67bcdb029
|
||||
github.com/coreos/go-oidc/v3 v3.1.0
|
||||
github.com/cenkalti/backoff/v4 v4.1.3
|
||||
github.com/coreos/go-oidc/v3 v3.4.0
|
||||
github.com/deckarep/golang-set/v2 v2.1.0
|
||||
github.com/efekarakus/termcolor v1.0.1
|
||||
github.com/fatih/set v0.2.1
|
||||
github.com/gin-gonic/gin v1.7.7
|
||||
github.com/glebarez/sqlite v1.4.3
|
||||
github.com/gofrs/uuid v4.2.0+incompatible
|
||||
github.com/glebarez/sqlite v1.5.0
|
||||
github.com/gofrs/uuid v4.3.0+incompatible
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.0
|
||||
github.com/klauspost/compress v1.15.1
|
||||
github.com/ory/dockertest/v3 v3.8.1
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0
|
||||
github.com/klauspost/compress v1.15.12
|
||||
github.com/oauth2-proxy/mockoidc v0.0.0-20220308204021-b9169deeb282
|
||||
github.com/ory/dockertest/v3 v3.9.1
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1
|
||||
github.com/prometheus/client_golang v1.12.1
|
||||
github.com/pterm/pterm v0.12.41
|
||||
github.com/rs/zerolog v1.26.1
|
||||
github.com/spf13/cobra v1.4.0
|
||||
github.com/spf13/viper v1.11.0
|
||||
github.com/stretchr/testify v1.7.1
|
||||
github.com/tailscale/hujson v0.0.0-20220421170326-6583d0610064
|
||||
github.com/prometheus/client_golang v1.13.0
|
||||
github.com/prometheus/common v0.37.0
|
||||
github.com/pterm/pterm v0.12.49
|
||||
github.com/puzpuzpuz/xsync/v2 v2.0.2
|
||||
github.com/rs/zerolog v1.28.0
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/spf13/viper v1.13.0
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/tailscale/hujson v0.0.0-20220630195928-54599719472f
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e
|
||||
github.com/zsais/go-gin-prometheus v0.1.0
|
||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4
|
||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
google.golang.org/genproto v0.0.0-20220422154200-b37d22cd5731
|
||||
google.golang.org/grpc v1.46.0
|
||||
google.golang.org/protobuf v1.28.0
|
||||
go4.org/netipx v0.0.0-20220925034521-797b0c90d8ab
|
||||
golang.org/x/crypto v0.1.0
|
||||
golang.org/x/net v0.1.0
|
||||
golang.org/x/oauth2 v0.1.0
|
||||
golang.org/x/sync v0.1.0
|
||||
google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c
|
||||
google.golang.org/grpc v1.50.1
|
||||
google.golang.org/protobuf v1.28.1
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
|
||||
gorm.io/driver/postgres v1.3.5
|
||||
gorm.io/gorm v1.23.4
|
||||
inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6
|
||||
tailscale.com v1.24.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gorm.io/driver/postgres v1.4.5
|
||||
gorm.io/gorm v1.24.1-0.20221019064659-5dd2bb482755
|
||||
tailscale.com v1.32.2
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.1 // indirect
|
||||
atomicgo.dev/cursor v0.1.1 // indirect
|
||||
atomicgo.dev/keyboard v0.2.8 // indirect
|
||||
filippo.io/edwards25519 v1.0.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
||||
github.com/akutz/memconn v0.1.0 // indirect
|
||||
github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 // indirect
|
||||
github.com/atomicgo/cursor v0.0.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6 // indirect
|
||||
github.com/containerd/console v1.0.3 // indirect
|
||||
github.com/containerd/continuity v0.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/cli v20.10.11+incompatible // indirect
|
||||
github.com/docker/docker v20.10.7+incompatible // indirect
|
||||
github.com/docker/cli v20.10.21+incompatible // indirect
|
||||
github.com/docker/docker v20.10.21+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/glebarez/go-sqlite v1.16.0 // indirect
|
||||
github.com/go-playground/locales v0.13.0 // indirect
|
||||
github.com/go-playground/universal-translator v0.17.0 // indirect
|
||||
github.com/go-playground/validator/v10 v10.4.1 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.4.0 // indirect
|
||||
github.com/glebarez/go-sqlite v1.19.2 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.7 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/go-github v17.0.0+incompatible // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gookit/color v1.5.0 // indirect
|
||||
github.com/hashicorp/go-version v1.4.0 // indirect
|
||||
github.com/gookit/color v1.5.2 // indirect
|
||||
github.com/hashicorp/go-version v1.6.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/hdevalence/ed25519consensus v0.1.0 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||
github.com/jackc/pgconn v1.12.0 // indirect
|
||||
github.com/jackc/pgconn v1.13.0 // indirect
|
||||
github.com/jackc/pgio v1.0.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgproto3/v2 v2.3.0 // indirect
|
||||
github.com/jackc/pgproto3/v2 v2.3.1 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
|
||||
github.com/jackc/pgtype v1.11.0 // indirect
|
||||
github.com/jackc/pgx/v4 v4.16.0 // indirect
|
||||
github.com/jackc/pgtype v1.12.0 // indirect
|
||||
github.com/jackc/pgx/v4 v4.17.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.4 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/josharian/native v1.0.0 // indirect
|
||||
github.com/jsimonetti/rtnetlink v1.1.2-0.20220408201609-d380b505068b // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/jsimonetti/rtnetlink v1.2.3 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/kr/pretty v0.3.0 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/leodido/go-urn v1.2.0 // indirect
|
||||
github.com/lithammer/fuzzysearch v1.1.5 // indirect
|
||||
github.com/magiconair/properties v1.8.6 // indirect
|
||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/mdlayher/netlink v1.6.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mdlayher/netlink v1.6.2 // indirect
|
||||
github.com/mdlayher/socket v0.2.3 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||
github.com/mitchellh/go-ps v1.0.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.3 // indirect
|
||||
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/opencontainers/runc v1.0.2 // indirect
|
||||
github.com/pelletier/go-toml v1.9.4 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.0-beta.8 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2 // indirect
|
||||
github.com/opencontainers/runc v1.1.4 // indirect
|
||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20220927061507-ef77025ab5aa // indirect
|
||||
github.com/rivo/uniseg v0.4.2 // indirect
|
||||
github.com/rogpeppe/go-internal v1.8.1-0.20211023094830-115ce09fd6b4 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
github.com/spf13/afero v1.8.2 // indirect
|
||||
github.com/spf13/cast v1.4.1 // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/spf13/afero v1.9.2 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/subosito/gotenv v1.2.0 // indirect
|
||||
github.com/ugorji/go/codec v1.1.7 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
||||
github.com/subosito/gotenv v1.4.1 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 // indirect
|
||||
go4.org/intern v0.0.0-20211027215823-ae77deb06f29 // indirect
|
||||
go4.org/mem v0.0.0-20210711025021-927187094b94 // indirect
|
||||
go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37 // indirect
|
||||
golang.org/x/net v0.0.0-20220412020605-290c469a71a5 // indirect
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect
|
||||
golang.zx2c4.com/wireguard/windows v0.4.10 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
go4.org/mem v0.0.0-20220726221520-4f986261bf13 // indirect
|
||||
golang.org/x/mod v0.6.0 // indirect
|
||||
golang.org/x/sys v0.1.0 // indirect
|
||||
golang.org/x/term v0.1.0 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
golang.org/x/time v0.1.0 // indirect
|
||||
golang.org/x/tools v0.2.0 // indirect
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
gopkg.in/ini.v1 v1.66.4 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.5.1 // indirect
|
||||
modernc.org/libc v1.14.12 // indirect
|
||||
modernc.org/mathutil v1.4.1 // indirect
|
||||
modernc.org/memory v1.0.7 // indirect
|
||||
modernc.org/sqlite v1.16.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
modernc.org/libc v1.21.4 // indirect
|
||||
modernc.org/mathutil v1.5.0 // indirect
|
||||
modernc.org/memory v1.4.0 // indirect
|
||||
modernc.org/sqlite v1.19.3 // indirect
|
||||
nhooyr.io/websocket v1.8.7 // indirect
|
||||
)
|
||||
|
||||
136
grpcv1.go
136
grpcv1.go
@@ -1,13 +1,18 @@
|
||||
//nolint
|
||||
// nolint
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/rs/zerolog/log"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
type headscaleV1APIServer struct { // v1.HeadscaleServiceServer
|
||||
@@ -102,11 +107,21 @@ func (api headscaleV1APIServer) CreatePreAuthKey(
|
||||
expiration = request.GetExpiration().AsTime()
|
||||
}
|
||||
|
||||
for _, tag := range request.AclTags {
|
||||
err := validateTag(tag)
|
||||
if err != nil {
|
||||
return &v1.CreatePreAuthKeyResponse{
|
||||
PreAuthKey: nil,
|
||||
}, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
preAuthKey, err := api.h.CreatePreAuthKey(
|
||||
request.GetNamespace(),
|
||||
request.GetReusable(),
|
||||
request.GetEphemeral(),
|
||||
&expiration,
|
||||
request.AclTags,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -155,7 +170,7 @@ func (api headscaleV1APIServer) RegisterMachine(
|
||||
) (*v1.RegisterMachineResponse, error) {
|
||||
log.Trace().
|
||||
Str("namespace", request.GetNamespace()).
|
||||
Str("machine_key", request.GetKey()).
|
||||
Str("node_key", request.GetKey()).
|
||||
Msg("Registering machine")
|
||||
|
||||
machine, err := api.h.RegisterMachineFromAuthCallback(
|
||||
@@ -182,6 +197,52 @@ func (api headscaleV1APIServer) GetMachine(
|
||||
return &v1.GetMachineResponse{Machine: machine.toProto()}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) SetTags(
|
||||
ctx context.Context,
|
||||
request *v1.SetTagsRequest,
|
||||
) (*v1.SetTagsResponse, error) {
|
||||
machine, err := api.h.GetMachineByID(request.GetMachineId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, tag := range request.GetTags() {
|
||||
err := validateTag(tag)
|
||||
if err != nil {
|
||||
return &v1.SetTagsResponse{
|
||||
Machine: nil,
|
||||
}, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
err = api.h.SetTags(machine, request.GetTags())
|
||||
if err != nil {
|
||||
return &v1.SetTagsResponse{
|
||||
Machine: nil,
|
||||
}, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Str("machine", machine.Hostname).
|
||||
Strs("tags", request.GetTags()).
|
||||
Msg("Changing tags of machine")
|
||||
|
||||
return &v1.SetTagsResponse{Machine: machine.toProto()}, nil
|
||||
}
|
||||
|
||||
func validateTag(tag string) error {
|
||||
if strings.Index(tag, "tag:") != 0 {
|
||||
return fmt.Errorf("tag must start with the string 'tag:'")
|
||||
}
|
||||
if strings.ToLower(tag) != tag {
|
||||
return fmt.Errorf("tag should be lowercase")
|
||||
}
|
||||
if len(strings.Fields(tag)) > 1 {
|
||||
return fmt.Errorf("tag should not contains space")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) DeleteMachine(
|
||||
ctx context.Context,
|
||||
request *v1.DeleteMachineRequest,
|
||||
@@ -215,13 +276,38 @@ func (api headscaleV1APIServer) ExpireMachine(
|
||||
)
|
||||
|
||||
log.Trace().
|
||||
Str("machine", machine.Name).
|
||||
Str("machine", machine.Hostname).
|
||||
Time("expiry", *machine.Expiry).
|
||||
Msg("machine expired")
|
||||
|
||||
return &v1.ExpireMachineResponse{Machine: machine.toProto()}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) RenameMachine(
|
||||
ctx context.Context,
|
||||
request *v1.RenameMachineRequest,
|
||||
) (*v1.RenameMachineResponse, error) {
|
||||
machine, err := api.h.GetMachineByID(request.GetMachineId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = api.h.RenameMachine(
|
||||
machine,
|
||||
request.GetNewName(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Str("machine", machine.Hostname).
|
||||
Str("new_name", request.GetNewName()).
|
||||
Msg("machine renamed")
|
||||
|
||||
return &v1.RenameMachineResponse{Machine: machine.toProto()}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) ListMachines(
|
||||
ctx context.Context,
|
||||
request *v1.ListMachinesRequest,
|
||||
@@ -247,12 +333,37 @@ func (api headscaleV1APIServer) ListMachines(
|
||||
|
||||
response := make([]*v1.Machine, len(machines))
|
||||
for index, machine := range machines {
|
||||
response[index] = machine.toProto()
|
||||
m := machine.toProto()
|
||||
validTags, invalidTags := getTags(
|
||||
api.h.aclPolicy,
|
||||
machine,
|
||||
api.h.cfg.OIDC.StripEmaildomain,
|
||||
)
|
||||
m.InvalidTags = invalidTags
|
||||
m.ValidTags = validTags
|
||||
response[index] = m
|
||||
}
|
||||
|
||||
return &v1.ListMachinesResponse{Machines: response}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) MoveMachine(
|
||||
ctx context.Context,
|
||||
request *v1.MoveMachineRequest,
|
||||
) (*v1.MoveMachineResponse, error) {
|
||||
machine, err := api.h.GetMachineByID(request.GetMachineId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = api.h.SetMachineNamespace(machine, request.GetNamespace())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1.MoveMachineResponse{Machine: machine.toProto()}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) GetMachineRoute(
|
||||
ctx context.Context,
|
||||
request *v1.GetMachineRouteRequest,
|
||||
@@ -369,9 +480,15 @@ func (api headscaleV1APIServer) DebugCreateMachine(
|
||||
Hostname: "DebugTestMachine",
|
||||
}
|
||||
|
||||
givenName, err := api.h.GenerateGivenName(request.GetKey(), request.GetName())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newMachine := Machine{
|
||||
MachineKey: request.GetKey(),
|
||||
Name: request.GetName(),
|
||||
Hostname: request.GetName(),
|
||||
GivenName: givenName,
|
||||
Namespace: *namespace,
|
||||
|
||||
Expiry: &time.Time{},
|
||||
@@ -380,9 +497,14 @@ func (api headscaleV1APIServer) DebugCreateMachine(
|
||||
|
||||
HostInfo: HostInfo(hostinfo),
|
||||
}
|
||||
nodeKey := key.NodePublic{}
|
||||
err = nodeKey.UnmarshalText([]byte(request.GetKey()))
|
||||
if err != nil {
|
||||
log.Panic().Msg("can not add machine for debug. invalid node key")
|
||||
}
|
||||
|
||||
api.h.registrationCache.Set(
|
||||
request.GetKey(),
|
||||
NodePublicKeyStripPrefix(nodeKey),
|
||||
newMachine,
|
||||
registerCacheExpiration,
|
||||
)
|
||||
|
||||
42
grpcv1_test.go
Normal file
42
grpcv1_test.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package headscale
|
||||
|
||||
import "testing"
|
||||
|
||||
func Test_validateTag(t *testing.T) {
|
||||
type args struct {
|
||||
tag string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid tag",
|
||||
args: args{tag: "tag:test"},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "tag without tag prefix",
|
||||
args: args{tag: "test"},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "uppercase tag",
|
||||
args: args{tag: "tag:tEST"},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "tag that contains space",
|
||||
args: args{tag: "tag:this is a spaced tag"},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := validateTag(tt.args.tag); (err != nil) != tt.wantErr {
|
||||
t.Errorf("validateTag() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
15
handler_legacy.go
Normal file
15
handler_legacy.go
Normal file
@@ -0,0 +1,15 @@
|
||||
//go:build ts2019
|
||||
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
func (h *Headscale) addLegacyHandlers(router *mux.Router) {
|
||||
router.HandleFunc("/machine/{mkey}/map", h.PollNetMapHandler).
|
||||
Methods(http.MethodPost)
|
||||
router.HandleFunc("/machine/{mkey}", h.RegistrationHandler).Methods(http.MethodPost)
|
||||
}
|
||||
8
handler_placeholder.go
Normal file
8
handler_placeholder.go
Normal file
@@ -0,0 +1,8 @@
|
||||
//go:build !ts2019
|
||||
|
||||
package headscale
|
||||
|
||||
import "github.com/gorilla/mux"
|
||||
|
||||
func (h *Headscale) addLegacyHandlers(router *mux.Router) {
|
||||
}
|
||||
192
integration/auth_web_flow_test.go
Normal file
192
integration/auth_web_flow_test.go
Normal file
@@ -0,0 +1,192 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var errParseAuthPage = errors.New("failed to parse auth page")
|
||||
|
||||
type AuthWebFlowScenario struct {
|
||||
*Scenario
|
||||
}
|
||||
|
||||
func TestAuthWebFlowAuthenticationPingAll(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
baseScenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
scenario := AuthWebFlowScenario{
|
||||
Scenario: baseScenario,
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
"namespace1": len(TailscaleVersions),
|
||||
"namespace2": len(TailscaleVersions),
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
success := 0
|
||||
|
||||
for _, client := range allClients {
|
||||
for _, ip := range allIps {
|
||||
err := client.Ping(ip.String())
|
||||
if err != nil {
|
||||
t.Errorf("failed to ping %s from %s: %s", ip, client.Hostname(), err)
|
||||
} else {
|
||||
success++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *AuthWebFlowScenario) CreateHeadscaleEnv(namespaces map[string]int) error {
|
||||
err := s.StartHeadscale()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.Headscale().WaitForReady()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for namespaceName, clientCount := range namespaces {
|
||||
log.Printf("creating namespace %s with %d clients", namespaceName, clientCount)
|
||||
err = s.CreateNamespace(namespaceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.CreateTailscaleNodesInNamespace(namespaceName, "all", clientCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.runTailscaleUp(namespaceName, s.Headscale().GetEndpoint())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *AuthWebFlowScenario) runTailscaleUp(
|
||||
namespaceStr, loginServer string,
|
||||
) error {
|
||||
log.Printf("running tailscale up for namespace %s", namespaceStr)
|
||||
if namespace, ok := s.namespaces[namespaceStr]; ok {
|
||||
for _, client := range namespace.Clients {
|
||||
namespace.joinWaitGroup.Add(1)
|
||||
|
||||
go func(c TailscaleClient) {
|
||||
defer namespace.joinWaitGroup.Done()
|
||||
|
||||
// TODO(juanfont): error handle this
|
||||
loginURL, err := c.UpWithLoginURL(loginServer)
|
||||
if err != nil {
|
||||
log.Printf("failed to run tailscale up: %s", err)
|
||||
}
|
||||
|
||||
err = s.runHeadscaleRegister(namespaceStr, loginURL)
|
||||
if err != nil {
|
||||
log.Printf("failed to register client: %s", err)
|
||||
}
|
||||
|
||||
err = c.WaitForReady()
|
||||
if err != nil {
|
||||
log.Printf("error waiting for client %s to be ready: %s", c.Hostname(), err)
|
||||
}
|
||||
}(client)
|
||||
}
|
||||
namespace.joinWaitGroup.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to up tailscale node: %w", errNoNamespaceAvailable)
|
||||
}
|
||||
|
||||
func (s *AuthWebFlowScenario) runHeadscaleRegister(namespaceStr string, loginURL *url.URL) error {
|
||||
log.Printf("loginURL: %s", loginURL)
|
||||
loginURL.Host = fmt.Sprintf("%s:8080", s.Headscale().GetIP())
|
||||
loginURL.Scheme = "http"
|
||||
|
||||
httpClient := &http.Client{}
|
||||
ctx := context.Background()
|
||||
req, _ := http.NewRequestWithContext(ctx, http.MethodGet, loginURL.String(), nil)
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
// see api.go HTML template
|
||||
codeSep := strings.Split(string(body), "</code>")
|
||||
if len(codeSep) != 2 {
|
||||
return errParseAuthPage
|
||||
}
|
||||
|
||||
keySep := strings.Split(codeSep[0], "key ")
|
||||
if len(keySep) != 2 {
|
||||
return errParseAuthPage
|
||||
}
|
||||
key := keySep[1]
|
||||
log.Printf("registering node %s", key)
|
||||
|
||||
if headscale, ok := s.controlServers["headscale"]; ok {
|
||||
_, err = headscale.Execute([]string{"headscale", "-n", namespaceStr, "nodes", "register", "--key", key})
|
||||
if err != nil {
|
||||
log.Printf("failed to register node: %s", err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to find headscale: %w", errNoHeadscaleAvailable)
|
||||
}
|
||||
377
integration/cli_test.go
Normal file
377
integration/cli_test.go
Normal file
@@ -0,0 +1,377 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func executeAndUnmarshal[T any](headscale ControlServer, command []string, result T) error {
|
||||
str, err := headscale.Execute(command)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(str), result)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestNamespaceCommand(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario, err := NewScenario()
|
||||
assert.NoError(t, err)
|
||||
|
||||
spec := map[string]int{
|
||||
"namespace1": 0,
|
||||
"namespace2": 0,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var listNamespaces []v1.Namespace
|
||||
err = executeAndUnmarshal(scenario.Headscale(),
|
||||
[]string{
|
||||
"headscale",
|
||||
"namespaces",
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&listNamespaces,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
result := []string{listNamespaces[0].Name, listNamespaces[1].Name}
|
||||
sort.Strings(result)
|
||||
|
||||
assert.Equal(
|
||||
t,
|
||||
[]string{"namespace1", "namespace2"},
|
||||
result,
|
||||
)
|
||||
|
||||
_, err = scenario.Headscale().Execute(
|
||||
[]string{
|
||||
"headscale",
|
||||
"namespaces",
|
||||
"rename",
|
||||
"--output",
|
||||
"json",
|
||||
"namespace2",
|
||||
"newname",
|
||||
},
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var listAfterRenameNamespaces []v1.Namespace
|
||||
err = executeAndUnmarshal(scenario.Headscale(),
|
||||
[]string{
|
||||
"headscale",
|
||||
"namespaces",
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&listAfterRenameNamespaces,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
result = []string{listAfterRenameNamespaces[0].Name, listAfterRenameNamespaces[1].Name}
|
||||
sort.Strings(result)
|
||||
|
||||
assert.Equal(
|
||||
t,
|
||||
[]string{"namespace1", "newname"},
|
||||
result,
|
||||
)
|
||||
|
||||
err = scenario.Shutdown()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestPreAuthKeyCommand(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
namespace := "preauthkeyspace"
|
||||
count := 3
|
||||
|
||||
scenario, err := NewScenario()
|
||||
assert.NoError(t, err)
|
||||
|
||||
spec := map[string]int{
|
||||
namespace: 0,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec)
|
||||
assert.NoError(t, err)
|
||||
|
||||
keys := make([]*v1.PreAuthKey, count)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for index := 0; index < count; index++ {
|
||||
var preAuthKey v1.PreAuthKey
|
||||
err := executeAndUnmarshal(
|
||||
scenario.Headscale(),
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--namespace",
|
||||
namespace,
|
||||
"create",
|
||||
"--reusable",
|
||||
"--expiration",
|
||||
"24h",
|
||||
"--output",
|
||||
"json",
|
||||
"--tags",
|
||||
"tag:test1,tag:test2",
|
||||
},
|
||||
&preAuthKey,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
keys[index] = &preAuthKey
|
||||
}
|
||||
|
||||
assert.Len(t, keys, 3)
|
||||
|
||||
var listedPreAuthKeys []v1.PreAuthKey
|
||||
err = executeAndUnmarshal(
|
||||
scenario.Headscale(),
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--namespace",
|
||||
namespace,
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&listedPreAuthKeys,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// There is one key created by "scenario.CreateHeadscaleEnv"
|
||||
assert.Len(t, listedPreAuthKeys, 4)
|
||||
|
||||
assert.Equal(
|
||||
t,
|
||||
[]string{keys[0].Id, keys[1].Id, keys[2].Id},
|
||||
[]string{listedPreAuthKeys[1].Id, listedPreAuthKeys[2].Id, listedPreAuthKeys[3].Id},
|
||||
)
|
||||
|
||||
assert.NotEmpty(t, listedPreAuthKeys[1].Key)
|
||||
assert.NotEmpty(t, listedPreAuthKeys[2].Key)
|
||||
assert.NotEmpty(t, listedPreAuthKeys[3].Key)
|
||||
|
||||
assert.True(t, listedPreAuthKeys[1].Expiration.AsTime().After(time.Now()))
|
||||
assert.True(t, listedPreAuthKeys[2].Expiration.AsTime().After(time.Now()))
|
||||
assert.True(t, listedPreAuthKeys[3].Expiration.AsTime().After(time.Now()))
|
||||
|
||||
assert.True(
|
||||
t,
|
||||
listedPreAuthKeys[1].Expiration.AsTime().Before(time.Now().Add(time.Hour*26)),
|
||||
)
|
||||
assert.True(
|
||||
t,
|
||||
listedPreAuthKeys[2].Expiration.AsTime().Before(time.Now().Add(time.Hour*26)),
|
||||
)
|
||||
assert.True(
|
||||
t,
|
||||
listedPreAuthKeys[3].Expiration.AsTime().Before(time.Now().Add(time.Hour*26)),
|
||||
)
|
||||
|
||||
for index := range listedPreAuthKeys {
|
||||
if index == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
assert.Equal(t, listedPreAuthKeys[index].AclTags, []string{"tag:test1", "tag:test2"})
|
||||
}
|
||||
|
||||
// Test key expiry
|
||||
_, err = scenario.Headscale().Execute(
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--namespace",
|
||||
namespace,
|
||||
"expire",
|
||||
listedPreAuthKeys[1].Key,
|
||||
},
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var listedPreAuthKeysAfterExpire []v1.PreAuthKey
|
||||
err = executeAndUnmarshal(
|
||||
scenario.Headscale(),
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--namespace",
|
||||
namespace,
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&listedPreAuthKeysAfterExpire,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.True(t, listedPreAuthKeysAfterExpire[1].Expiration.AsTime().Before(time.Now()))
|
||||
assert.True(t, listedPreAuthKeysAfterExpire[2].Expiration.AsTime().After(time.Now()))
|
||||
assert.True(t, listedPreAuthKeysAfterExpire[3].Expiration.AsTime().After(time.Now()))
|
||||
|
||||
err = scenario.Shutdown()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
namespace := "pre-auth-key-without-exp-namespace"
|
||||
|
||||
scenario, err := NewScenario()
|
||||
assert.NoError(t, err)
|
||||
|
||||
spec := map[string]int{
|
||||
namespace: 0,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var preAuthKey v1.PreAuthKey
|
||||
err = executeAndUnmarshal(
|
||||
scenario.Headscale(),
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--namespace",
|
||||
namespace,
|
||||
"create",
|
||||
"--reusable",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&preAuthKey,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var listedPreAuthKeys []v1.PreAuthKey
|
||||
err = executeAndUnmarshal(
|
||||
scenario.Headscale(),
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--namespace",
|
||||
namespace,
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&listedPreAuthKeys,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// There is one key created by "scenario.CreateHeadscaleEnv"
|
||||
assert.Len(t, listedPreAuthKeys, 2)
|
||||
|
||||
assert.True(t, listedPreAuthKeys[1].Expiration.AsTime().After(time.Now()))
|
||||
assert.True(
|
||||
t,
|
||||
listedPreAuthKeys[1].Expiration.AsTime().Before(time.Now().Add(time.Minute*70)),
|
||||
)
|
||||
|
||||
err = scenario.Shutdown()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
namespace := "pre-auth-key-reus-ephm-namespace"
|
||||
|
||||
scenario, err := NewScenario()
|
||||
assert.NoError(t, err)
|
||||
|
||||
spec := map[string]int{
|
||||
namespace: 0,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var preAuthReusableKey v1.PreAuthKey
|
||||
err = executeAndUnmarshal(
|
||||
scenario.Headscale(),
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--namespace",
|
||||
namespace,
|
||||
"create",
|
||||
"--reusable=true",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&preAuthReusableKey,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var preAuthEphemeralKey v1.PreAuthKey
|
||||
err = executeAndUnmarshal(
|
||||
scenario.Headscale(),
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--namespace",
|
||||
namespace,
|
||||
"create",
|
||||
"--ephemeral=true",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&preAuthEphemeralKey,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.True(t, preAuthEphemeralKey.GetEphemeral())
|
||||
assert.False(t, preAuthEphemeralKey.GetReusable())
|
||||
|
||||
var listedPreAuthKeys []v1.PreAuthKey
|
||||
err = executeAndUnmarshal(
|
||||
scenario.Headscale(),
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--namespace",
|
||||
namespace,
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&listedPreAuthKeys,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// There is one key created by "scenario.CreateHeadscaleEnv"
|
||||
assert.Len(t, listedPreAuthKeys, 3)
|
||||
|
||||
err = scenario.Shutdown()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
16
integration/control.go
Normal file
16
integration/control.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
)
|
||||
|
||||
type ControlServer interface {
|
||||
Shutdown() error
|
||||
Execute(command []string) (string, error)
|
||||
GetHealthEndpoint() string
|
||||
GetEndpoint() string
|
||||
WaitForReady() error
|
||||
CreateNamespace(namespace string) error
|
||||
CreateAuthKey(namespace string) (*v1.PreAuthKey, error)
|
||||
ListMachinesInNamespace(namespace string) ([]*v1.Machine, error)
|
||||
}
|
||||
40
integration/dockertestutil/config.go
Normal file
40
integration/dockertestutil/config.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package dockertestutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
)
|
||||
|
||||
func IsRunningInContainer() bool {
|
||||
if _, err := os.Stat("/.dockerenv"); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func DockerRestartPolicy(config *docker.HostConfig) {
|
||||
// set AutoRemove to true so that stopped container goes away by itself on error *immediately*.
|
||||
// when set to false, containers remain until the end of the integration test.
|
||||
config.AutoRemove = false
|
||||
config.RestartPolicy = docker.RestartPolicy{
|
||||
Name: "no",
|
||||
}
|
||||
}
|
||||
|
||||
func DockerAllowLocalIPv6(config *docker.HostConfig) {
|
||||
if config.Sysctls == nil {
|
||||
config.Sysctls = make(map[string]string, 1)
|
||||
}
|
||||
config.Sysctls["net.ipv6.conf.all.disable_ipv6"] = "0"
|
||||
}
|
||||
|
||||
func DockerAllowNetworkAdministration(config *docker.HostConfig) {
|
||||
config.CapAdd = append(config.CapAdd, "NET_ADMIN")
|
||||
config.Mounts = append(config.Mounts, docker.HostMount{
|
||||
Type: "bind",
|
||||
Source: "/dev/net/tun",
|
||||
Target: "/dev/net/tun",
|
||||
})
|
||||
}
|
||||
94
integration/dockertestutil/execute.go
Normal file
94
integration/dockertestutil/execute.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package dockertestutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ory/dockertest/v3"
|
||||
)
|
||||
|
||||
const dockerExecuteTimeout = time.Second * 10
|
||||
|
||||
var (
|
||||
ErrDockertestCommandFailed = errors.New("dockertest command failed")
|
||||
ErrDockertestCommandTimeout = errors.New("dockertest command timed out")
|
||||
)
|
||||
|
||||
type ExecuteCommandConfig struct {
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
type ExecuteCommandOption func(*ExecuteCommandConfig) error
|
||||
|
||||
func ExecuteCommandTimeout(timeout time.Duration) ExecuteCommandOption {
|
||||
return ExecuteCommandOption(func(conf *ExecuteCommandConfig) error {
|
||||
conf.timeout = timeout
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func ExecuteCommand(
|
||||
resource *dockertest.Resource,
|
||||
cmd []string,
|
||||
env []string,
|
||||
options ...ExecuteCommandOption,
|
||||
) (string, string, error) {
|
||||
var stdout bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
|
||||
execConfig := ExecuteCommandConfig{
|
||||
timeout: dockerExecuteTimeout,
|
||||
}
|
||||
|
||||
for _, opt := range options {
|
||||
if err := opt(&execConfig); err != nil {
|
||||
return "", "", fmt.Errorf("execute-command/options: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
type result struct {
|
||||
exitCode int
|
||||
err error
|
||||
}
|
||||
|
||||
resultChan := make(chan result, 1)
|
||||
|
||||
// Run your long running function in it's own goroutine and pass back it's
|
||||
// response into our channel.
|
||||
go func() {
|
||||
exitCode, err := resource.Exec(
|
||||
cmd,
|
||||
dockertest.ExecOptions{
|
||||
Env: append(env, "HEADSCALE_LOG_LEVEL=disabled"),
|
||||
StdOut: &stdout,
|
||||
StdErr: &stderr,
|
||||
},
|
||||
)
|
||||
resultChan <- result{exitCode, err}
|
||||
}()
|
||||
|
||||
// Listen on our channel AND a timeout channel - which ever happens first.
|
||||
select {
|
||||
case res := <-resultChan:
|
||||
if res.err != nil {
|
||||
return stdout.String(), stderr.String(), res.err
|
||||
}
|
||||
|
||||
if res.exitCode != 0 {
|
||||
// Uncomment for debugging
|
||||
// log.Println("Command: ", cmd)
|
||||
// log.Println("stdout: ", stdout.String())
|
||||
// log.Println("stderr: ", stderr.String())
|
||||
|
||||
return stdout.String(), stderr.String(), ErrDockertestCommandFailed
|
||||
}
|
||||
|
||||
return stdout.String(), stderr.String(), nil
|
||||
case <-time.After(execConfig.timeout):
|
||||
|
||||
return stdout.String(), stderr.String(), ErrDockertestCommandTimeout
|
||||
}
|
||||
}
|
||||
62
integration/dockertestutil/network.go
Normal file
62
integration/dockertestutil/network.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package dockertestutil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
)
|
||||
|
||||
var ErrContainerNotFound = errors.New("container not found")
|
||||
|
||||
func GetFirstOrCreateNetwork(pool *dockertest.Pool, name string) (*dockertest.Network, error) {
|
||||
networks, err := pool.NetworksByName(name)
|
||||
if err != nil || len(networks) == 0 {
|
||||
if _, err := pool.CreateNetwork(name); err == nil {
|
||||
// Create does not give us an updated version of the resource, so we need to
|
||||
// get it again.
|
||||
networks, err := pool.NetworksByName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &networks[0], nil
|
||||
}
|
||||
}
|
||||
|
||||
return &networks[0], nil
|
||||
}
|
||||
|
||||
func AddContainerToNetwork(
|
||||
pool *dockertest.Pool,
|
||||
network *dockertest.Network,
|
||||
testContainer string,
|
||||
) error {
|
||||
containers, err := pool.Client.ListContainers(docker.ListContainersOptions{
|
||||
All: true,
|
||||
Filters: map[string][]string{
|
||||
"name": {testContainer},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = pool.Client.ConnectNetwork(network.Network.ID, docker.NetworkConnectionOptions{
|
||||
Container: containers[0].ID,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(kradalby): This doesnt work reliably, but calling the exact same functions
|
||||
// seem to work fine...
|
||||
// if container, ok := pool.ContainerByName("/" + testContainer); ok {
|
||||
// err := container.ConnectToNetwork(network)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
|
||||
return nil
|
||||
}
|
||||
340
integration/general_test.go
Normal file
340
integration/general_test.go
Normal file
@@ -0,0 +1,340 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func TestPingAllByIP(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
"namespace1": len(TailscaleVersions),
|
||||
"namespace2": len(TailscaleVersions),
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
success := 0
|
||||
|
||||
for _, client := range allClients {
|
||||
for _, ip := range allIps {
|
||||
err := client.Ping(ip.String())
|
||||
if err != nil {
|
||||
t.Errorf("failed to ping %s from %s: %s", ip, client.Hostname(), err)
|
||||
} else {
|
||||
success++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPingAllByHostname(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
// Omit 1.16.2 (-1) because it does not have the FQDN field
|
||||
"namespace3": len(TailscaleVersions) - 1,
|
||||
"namespace4": len(TailscaleVersions) - 1,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
allHostnames, err := scenario.ListTailscaleClientsFQDNs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get FQDNs: %s", err)
|
||||
}
|
||||
|
||||
success := 0
|
||||
|
||||
for _, client := range allClients {
|
||||
for _, hostname := range allHostnames {
|
||||
err := client.Ping(hostname)
|
||||
if err != nil {
|
||||
t.Errorf("failed to ping %s from %s: %s", hostname, client.Hostname(), err)
|
||||
} else {
|
||||
success++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allClients))
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTaildrop(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
retry := func(times int, sleepInverval time.Duration, doWork func() error) error {
|
||||
var err error
|
||||
for attempts := 0; attempts < times; attempts++ {
|
||||
err = doWork()
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(sleepInverval)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
// Omit 1.16.2 (-1) because it does not have the FQDN field
|
||||
"taildrop": len(TailscaleVersions) - 1,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
// This will essentially fetch and cache all the FQDNs
|
||||
_, err = scenario.ListTailscaleClientsFQDNs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get FQDNs: %s", err)
|
||||
}
|
||||
|
||||
for _, client := range allClients {
|
||||
command := []string{"touch", fmt.Sprintf("/tmp/file_from_%s", client.Hostname())}
|
||||
|
||||
if _, _, err := client.Execute(command); err != nil {
|
||||
t.Errorf("failed to create taildrop file on %s, err: %s", client.Hostname(), err)
|
||||
}
|
||||
|
||||
for _, peer := range allClients {
|
||||
if client.Hostname() == peer.Hostname() {
|
||||
continue
|
||||
}
|
||||
|
||||
// It is safe to ignore this error as we handled it when caching it
|
||||
peerFQDN, _ := peer.FQDN()
|
||||
|
||||
t.Run(fmt.Sprintf("%s-%s", client.Hostname(), peer.Hostname()), func(t *testing.T) {
|
||||
command := []string{
|
||||
"tailscale", "file", "cp",
|
||||
fmt.Sprintf("/tmp/file_from_%s", client.Hostname()),
|
||||
fmt.Sprintf("%s:", peerFQDN),
|
||||
}
|
||||
|
||||
err := retry(10, 1*time.Second, func() error {
|
||||
t.Logf(
|
||||
"Sending file from %s to %s\n",
|
||||
client.Hostname(),
|
||||
peer.Hostname(),
|
||||
)
|
||||
_, _, err := client.Execute(command)
|
||||
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf(
|
||||
"failed to send taildrop file on %s, err: %s",
|
||||
client.Hostname(),
|
||||
err,
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for _, client := range allClients {
|
||||
command := []string{
|
||||
"tailscale", "file",
|
||||
"get",
|
||||
"/tmp/",
|
||||
}
|
||||
if _, _, err := client.Execute(command); err != nil {
|
||||
t.Errorf("failed to get taildrop file on %s, err: %s", client.Hostname(), err)
|
||||
}
|
||||
|
||||
for _, peer := range allClients {
|
||||
if client.Hostname() == peer.Hostname() {
|
||||
continue
|
||||
}
|
||||
|
||||
t.Run(fmt.Sprintf("%s-%s", client.Hostname(), peer.Hostname()), func(t *testing.T) {
|
||||
command := []string{
|
||||
"ls",
|
||||
fmt.Sprintf("/tmp/file_from_%s", peer.Hostname()),
|
||||
}
|
||||
log.Printf(
|
||||
"Checking file in %s from %s\n",
|
||||
client.Hostname(),
|
||||
peer.Hostname(),
|
||||
)
|
||||
|
||||
result, _, err := client.Execute(command)
|
||||
if err != nil {
|
||||
t.Errorf("failed to execute command to ls taildrop: %s", err)
|
||||
}
|
||||
|
||||
log.Printf("Result for %s: %s\n", peer.Hostname(), result)
|
||||
if fmt.Sprintf("/tmp/file_from_%s\n", peer.Hostname()) != result {
|
||||
t.Errorf(
|
||||
"taildrop result is not correct %s, wanted %s",
|
||||
result,
|
||||
fmt.Sprintf("/tmp/file_from_%s\n", peer.Hostname()),
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMagicDNS(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
// Omit 1.16.2 (-1) because it does not have the FQDN field
|
||||
"magicdns1": len(TailscaleVersions) - 1,
|
||||
"magicdns2": len(TailscaleVersions) - 1,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
// Poor mans cache
|
||||
_, err = scenario.ListTailscaleClientsFQDNs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get FQDNs: %s", err)
|
||||
}
|
||||
|
||||
_, err = scenario.ListTailscaleClientsIPs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get IPs: %s", err)
|
||||
}
|
||||
|
||||
for _, client := range allClients {
|
||||
for _, peer := range allClients {
|
||||
// It is safe to ignore this error as we handled it when caching it
|
||||
peerFQDN, _ := peer.FQDN()
|
||||
|
||||
command := []string{
|
||||
"tailscale",
|
||||
"ip", peerFQDN,
|
||||
}
|
||||
result, _, err := client.Execute(command)
|
||||
if err != nil {
|
||||
t.Errorf(
|
||||
"failed to execute resolve/ip command %s from %s: %s",
|
||||
peerFQDN,
|
||||
client.Hostname(),
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
ips, err := peer.IPs()
|
||||
if err != nil {
|
||||
t.Errorf(
|
||||
"failed to get ips for %s: %s",
|
||||
peer.Hostname(),
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
for _, ip := range ips {
|
||||
if !strings.Contains(result, ip.String()) {
|
||||
t.Errorf("ip %s is not found in \n%s\n", ip.String(), result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
97
integration/hsic/config.go
Normal file
97
integration/hsic/config.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package hsic
|
||||
|
||||
// const (
|
||||
// defaultEphemeralNodeInactivityTimeout = time.Second * 30
|
||||
// defaultNodeUpdateCheckInterval = time.Second * 10
|
||||
// )
|
||||
|
||||
// TODO(kradalby): This approach doesnt work because we cannot
|
||||
// serialise our config object to YAML or JSON.
|
||||
// func DefaultConfig() headscale.Config {
|
||||
// derpMap, _ := url.Parse("https://controlplane.tailscale.com/derpmap/default")
|
||||
//
|
||||
// config := headscale.Config{
|
||||
// Log: headscale.LogConfig{
|
||||
// Level: zerolog.TraceLevel,
|
||||
// },
|
||||
// ACL: headscale.GetACLConfig(),
|
||||
// DBtype: "sqlite3",
|
||||
// EphemeralNodeInactivityTimeout: defaultEphemeralNodeInactivityTimeout,
|
||||
// NodeUpdateCheckInterval: defaultNodeUpdateCheckInterval,
|
||||
// IPPrefixes: []netip.Prefix{
|
||||
// netip.MustParsePrefix("fd7a:115c:a1e0::/48"),
|
||||
// netip.MustParsePrefix("100.64.0.0/10"),
|
||||
// },
|
||||
// DNSConfig: &tailcfg.DNSConfig{
|
||||
// Proxied: true,
|
||||
// Nameservers: []netip.Addr{
|
||||
// netip.MustParseAddr("127.0.0.11"),
|
||||
// netip.MustParseAddr("1.1.1.1"),
|
||||
// },
|
||||
// Resolvers: []*dnstype.Resolver{
|
||||
// {
|
||||
// Addr: "127.0.0.11",
|
||||
// },
|
||||
// {
|
||||
// Addr: "1.1.1.1",
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// BaseDomain: "headscale.net",
|
||||
//
|
||||
// DBpath: "/tmp/integration_test_db.sqlite3",
|
||||
//
|
||||
// PrivateKeyPath: "/tmp/integration_private.key",
|
||||
// NoisePrivateKeyPath: "/tmp/noise_integration_private.key",
|
||||
// Addr: "0.0.0.0:8080",
|
||||
// MetricsAddr: "127.0.0.1:9090",
|
||||
// ServerURL: "http://headscale:8080",
|
||||
//
|
||||
// DERP: headscale.DERPConfig{
|
||||
// URLs: []url.URL{
|
||||
// *derpMap,
|
||||
// },
|
||||
// AutoUpdate: false,
|
||||
// UpdateFrequency: 1 * time.Minute,
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// return config
|
||||
// }
|
||||
|
||||
// TODO: Reuse the actual configuration object above.
|
||||
func DefaultConfigYAML() string {
|
||||
yaml := `
|
||||
log:
|
||||
level: trace
|
||||
acl_policy_path: ""
|
||||
db_type: sqlite3
|
||||
db_path: /tmp/integration_test_db.sqlite3
|
||||
ephemeral_node_inactivity_timeout: 30m
|
||||
node_update_check_interval: 10s
|
||||
ip_prefixes:
|
||||
- fd7a:115c:a1e0::/48
|
||||
- 100.64.0.0/10
|
||||
dns_config:
|
||||
base_domain: headscale.net
|
||||
magic_dns: true
|
||||
domains: []
|
||||
nameservers:
|
||||
- 127.0.0.11
|
||||
- 1.1.1.1
|
||||
private_key_path: /tmp/private.key
|
||||
noise:
|
||||
private_key_path: /tmp/noise_private.key
|
||||
listen_addr: 0.0.0.0:8080
|
||||
metrics_listen_addr: 127.0.0.1:9090
|
||||
server_url: http://headscale:8080
|
||||
|
||||
derp:
|
||||
urls:
|
||||
- https://controlplane.tailscale.com/derpmap/default
|
||||
auto_update_enabled: false
|
||||
update_frequency: 1m
|
||||
`
|
||||
|
||||
return yaml
|
||||
}
|
||||
346
integration/hsic/hsic.go
Normal file
346
integration/hsic/hsic.go
Normal file
@@ -0,0 +1,346 @@
|
||||
package hsic
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
)
|
||||
|
||||
const (
|
||||
hsicHashLength = 6
|
||||
dockerContextPath = "../."
|
||||
aclPolicyPath = "/etc/headscale/acl.hujson"
|
||||
)
|
||||
|
||||
var errHeadscaleStatusCodeNotOk = errors.New("headscale status code not ok")
|
||||
|
||||
type HeadscaleInContainer struct {
|
||||
hostname string
|
||||
port int
|
||||
|
||||
pool *dockertest.Pool
|
||||
container *dockertest.Resource
|
||||
network *dockertest.Network
|
||||
|
||||
// optional config
|
||||
aclPolicy *headscale.ACLPolicy
|
||||
env []string
|
||||
}
|
||||
|
||||
type Option = func(c *HeadscaleInContainer)
|
||||
|
||||
func WithACLPolicy(acl *headscale.ACLPolicy) Option {
|
||||
return func(hsic *HeadscaleInContainer) {
|
||||
hsic.aclPolicy = acl
|
||||
}
|
||||
}
|
||||
|
||||
func WithConfigEnv(configEnv map[string]string) Option {
|
||||
return func(hsic *HeadscaleInContainer) {
|
||||
env := []string{}
|
||||
|
||||
for key, value := range configEnv {
|
||||
env = append(env, fmt.Sprintf("%s=%s", key, value))
|
||||
}
|
||||
|
||||
hsic.env = env
|
||||
}
|
||||
}
|
||||
|
||||
func New(
|
||||
pool *dockertest.Pool,
|
||||
port int,
|
||||
network *dockertest.Network,
|
||||
opts ...Option,
|
||||
) (*HeadscaleInContainer, error) {
|
||||
hash, err := headscale.GenerateRandomStringDNSSafe(hsicHashLength)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hostname := fmt.Sprintf("hs-%s", hash)
|
||||
portProto := fmt.Sprintf("%d/tcp", port)
|
||||
|
||||
hsic := &HeadscaleInContainer{
|
||||
hostname: hostname,
|
||||
port: port,
|
||||
|
||||
pool: pool,
|
||||
network: network,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(hsic)
|
||||
}
|
||||
|
||||
if hsic.aclPolicy != nil {
|
||||
hsic.env = append(hsic.env, fmt.Sprintf("HEADSCALE_ACL_POLICY_PATH=%s", aclPolicyPath))
|
||||
}
|
||||
|
||||
headscaleBuildOptions := &dockertest.BuildOptions{
|
||||
Dockerfile: "Dockerfile.debug",
|
||||
ContextDir: dockerContextPath,
|
||||
}
|
||||
|
||||
runOptions := &dockertest.RunOptions{
|
||||
Name: hostname,
|
||||
ExposedPorts: []string{portProto},
|
||||
Networks: []*dockertest.Network{network},
|
||||
// Cmd: []string{"headscale", "serve"},
|
||||
// TODO(kradalby): Get rid of this hack, we currently need to give us some
|
||||
// to inject the headscale configuration further down.
|
||||
Entrypoint: []string{"/bin/bash", "-c", "/bin/sleep 3 ; headscale serve"},
|
||||
Env: hsic.env,
|
||||
}
|
||||
|
||||
// dockertest isnt very good at handling containers that has already
|
||||
// been created, this is an attempt to make sure this container isnt
|
||||
// present.
|
||||
err = pool.RemoveContainerByName(hostname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
container, err := pool.BuildAndRunWithBuildOptions(
|
||||
headscaleBuildOptions,
|
||||
runOptions,
|
||||
dockertestutil.DockerRestartPolicy,
|
||||
dockertestutil.DockerAllowLocalIPv6,
|
||||
dockertestutil.DockerAllowNetworkAdministration,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not start headscale container: %w", err)
|
||||
}
|
||||
log.Printf("Created %s container\n", hostname)
|
||||
|
||||
hsic.container = container
|
||||
|
||||
err = hsic.WriteFile("/etc/headscale/config.yaml", []byte(DefaultConfigYAML()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to write headscale config to container: %w", err)
|
||||
}
|
||||
|
||||
if hsic.aclPolicy != nil {
|
||||
data, err := json.Marshal(hsic.aclPolicy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal ACL Policy to JSON: %w", err)
|
||||
}
|
||||
|
||||
err = hsic.WriteFile(aclPolicyPath, data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to write ACL policy to container: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return hsic, nil
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) Shutdown() error {
|
||||
return t.pool.Purge(t.container)
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) Execute(
|
||||
command []string,
|
||||
) (string, error) {
|
||||
log.Println("command", command)
|
||||
log.Printf("running command for %s\n", t.hostname)
|
||||
stdout, stderr, err := dockertestutil.ExecuteCommand(
|
||||
t.container,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
if err != nil {
|
||||
log.Printf("command stderr: %s\n", stderr)
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
if stdout != "" {
|
||||
log.Printf("command stdout: %s\n", stdout)
|
||||
}
|
||||
|
||||
return stdout, nil
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) GetIP() string {
|
||||
return t.container.GetIPInNetwork(t.network)
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) GetPort() string {
|
||||
return fmt.Sprintf("%d", t.port)
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) GetHealthEndpoint() string {
|
||||
hostEndpoint := fmt.Sprintf("%s:%d",
|
||||
t.GetIP(),
|
||||
t.port)
|
||||
|
||||
return fmt.Sprintf("http://%s/health", hostEndpoint)
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) GetEndpoint() string {
|
||||
hostEndpoint := fmt.Sprintf("%s:%d",
|
||||
t.GetIP(),
|
||||
t.port)
|
||||
|
||||
return fmt.Sprintf("http://%s", hostEndpoint)
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) WaitForReady() error {
|
||||
url := t.GetHealthEndpoint()
|
||||
|
||||
log.Printf("waiting for headscale to be ready at %s", url)
|
||||
|
||||
return t.pool.Retry(func() error {
|
||||
resp, err := http.Get(url) //nolint
|
||||
if err != nil {
|
||||
return fmt.Errorf("headscale is not ready: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return errHeadscaleStatusCodeNotOk
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) CreateNamespace(
|
||||
namespace string,
|
||||
) error {
|
||||
command := []string{"headscale", "namespaces", "create", namespace}
|
||||
|
||||
_, _, err := dockertestutil.ExecuteCommand(
|
||||
t.container,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) CreateAuthKey(
|
||||
namespace string,
|
||||
) (*v1.PreAuthKey, error) {
|
||||
command := []string{
|
||||
"headscale",
|
||||
"--namespace",
|
||||
namespace,
|
||||
"preauthkeys",
|
||||
"create",
|
||||
"--reusable",
|
||||
"--expiration",
|
||||
"24h",
|
||||
"--output",
|
||||
"json",
|
||||
}
|
||||
|
||||
result, _, err := dockertestutil.ExecuteCommand(
|
||||
t.container,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute create auth key command: %w", err)
|
||||
}
|
||||
|
||||
var preAuthKey v1.PreAuthKey
|
||||
err = json.Unmarshal([]byte(result), &preAuthKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal auth key: %w", err)
|
||||
}
|
||||
|
||||
return &preAuthKey, nil
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) ListMachinesInNamespace(
|
||||
namespace string,
|
||||
) ([]*v1.Machine, error) {
|
||||
command := []string{"headscale", "--namespace", namespace, "nodes", "list", "--output", "json"}
|
||||
|
||||
result, _, err := dockertestutil.ExecuteCommand(
|
||||
t.container,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute list node command: %w", err)
|
||||
}
|
||||
|
||||
var nodes []*v1.Machine
|
||||
err = json.Unmarshal([]byte(result), &nodes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal nodes: %w", err)
|
||||
}
|
||||
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) WriteFile(path string, data []byte) error {
|
||||
dirPath, fileName := filepath.Split(path)
|
||||
|
||||
file := bytes.NewReader(data)
|
||||
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
|
||||
tarWriter := tar.NewWriter(buf)
|
||||
|
||||
header := &tar.Header{
|
||||
Name: fileName,
|
||||
Size: file.Size(),
|
||||
// Mode: int64(stat.Mode()),
|
||||
// ModTime: stat.ModTime(),
|
||||
}
|
||||
|
||||
err := tarWriter.WriteHeader(header)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed write file header to tar: %w", err)
|
||||
}
|
||||
|
||||
_, err = io.Copy(tarWriter, file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy file to tar: %w", err)
|
||||
}
|
||||
|
||||
err = tarWriter.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to close tar: %w", err)
|
||||
}
|
||||
|
||||
log.Printf("tar: %s", buf.String())
|
||||
|
||||
// Ensure the directory is present inside the container
|
||||
_, err = t.Execute([]string{"mkdir", "-p", dirPath})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to ensure directory: %w", err)
|
||||
}
|
||||
|
||||
err = t.pool.Client.UploadToContainer(
|
||||
t.container.Container.ID,
|
||||
docker.UploadToContainerOptions{
|
||||
NoOverwriteDirNonDir: false,
|
||||
Path: dirPath,
|
||||
InputStream: bytes.NewReader(buf.Bytes()),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
425
integration/scenario.go
Normal file
425
integration/scenario.go
Normal file
@@ -0,0 +1,425 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/netip"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/juanfont/headscale/integration/tsic"
|
||||
"github.com/ory/dockertest/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
scenarioHashLength = 6
|
||||
maxWait = 60 * time.Second
|
||||
headscalePort = 8080
|
||||
)
|
||||
|
||||
var (
|
||||
errNoHeadscaleAvailable = errors.New("no headscale available")
|
||||
errNoNamespaceAvailable = errors.New("no namespace available")
|
||||
TailscaleVersions = []string{
|
||||
"head",
|
||||
"unstable",
|
||||
"1.32.1",
|
||||
"1.30.2",
|
||||
"1.28.0",
|
||||
"1.26.2",
|
||||
"1.24.2",
|
||||
"1.22.2",
|
||||
"1.20.4",
|
||||
"1.18.2",
|
||||
"1.16.2",
|
||||
|
||||
// These versions seem to fail when fetching from apt.
|
||||
// "1.14.6",
|
||||
// "1.12.4",
|
||||
// "1.10.2",
|
||||
// "1.8.7",
|
||||
}
|
||||
)
|
||||
|
||||
type Namespace struct {
|
||||
Clients map[string]TailscaleClient
|
||||
|
||||
createWaitGroup sync.WaitGroup
|
||||
joinWaitGroup sync.WaitGroup
|
||||
syncWaitGroup sync.WaitGroup
|
||||
}
|
||||
|
||||
// TODO(kradalby): make control server configurable, test correctness with Tailscale SaaS.
|
||||
type Scenario struct {
|
||||
// TODO(kradalby): support multiple headcales for later, currently only
|
||||
// use one.
|
||||
controlServers map[string]ControlServer
|
||||
|
||||
namespaces map[string]*Namespace
|
||||
|
||||
pool *dockertest.Pool
|
||||
network *dockertest.Network
|
||||
}
|
||||
|
||||
func NewScenario() (*Scenario, error) {
|
||||
hash, err := headscale.GenerateRandomStringDNSSafe(scenarioHashLength)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pool, err := dockertest.NewPool("")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not connect to docker: %w", err)
|
||||
}
|
||||
|
||||
pool.MaxWait = maxWait
|
||||
|
||||
networkName := fmt.Sprintf("hs-%s", hash)
|
||||
if overrideNetworkName := os.Getenv("HEADSCALE_TEST_NETWORK_NAME"); overrideNetworkName != "" {
|
||||
networkName = overrideNetworkName
|
||||
}
|
||||
|
||||
network, err := dockertestutil.GetFirstOrCreateNetwork(pool, networkName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create or get network: %w", err)
|
||||
}
|
||||
|
||||
// We run the test suite in a docker container that calls a couple of endpoints for
|
||||
// readiness checks, this ensures that we can run the tests with individual networks
|
||||
// and have the client reach the different containers
|
||||
err = dockertestutil.AddContainerToNetwork(pool, network, "headscale-test-suite")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to add test suite container to network: %w", err)
|
||||
}
|
||||
|
||||
return &Scenario{
|
||||
controlServers: make(map[string]ControlServer),
|
||||
namespaces: make(map[string]*Namespace),
|
||||
|
||||
pool: pool,
|
||||
network: network,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Scenario) Shutdown() error {
|
||||
for _, control := range s.controlServers {
|
||||
err := control.Shutdown()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to tear down control: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for namespaceName, namespace := range s.namespaces {
|
||||
for _, client := range namespace.Clients {
|
||||
log.Printf("removing client %s in namespace %s", client.Hostname(), namespaceName)
|
||||
err := client.Shutdown()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to tear down client: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.pool.RemoveNetwork(s.network); err != nil {
|
||||
return fmt.Errorf("failed to remove network: %w", err)
|
||||
}
|
||||
|
||||
// TODO(kradalby): This seem redundant to the previous call
|
||||
// if err := s.network.Close(); err != nil {
|
||||
// return fmt.Errorf("failed to tear down network: %w", err)
|
||||
// }
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Scenario) Namespaces() []string {
|
||||
namespaces := make([]string, 0)
|
||||
for namespace := range s.namespaces {
|
||||
namespaces = append(namespaces, namespace)
|
||||
}
|
||||
|
||||
return namespaces
|
||||
}
|
||||
|
||||
/// Headscale related stuff
|
||||
// Note: These functions assume that there is a _single_ headscale instance for now
|
||||
|
||||
// TODO(kradalby): make port and headscale configurable, multiple instances support?
|
||||
func (s *Scenario) StartHeadscale() error {
|
||||
headscale, err := hsic.New(s.pool, headscalePort, s.network,
|
||||
hsic.WithACLPolicy(
|
||||
&headscale.ACLPolicy{
|
||||
ACLs: []headscale.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"*"},
|
||||
Destinations: []string{"*:*"},
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create headscale container: %w", err)
|
||||
}
|
||||
|
||||
s.controlServers["headscale"] = headscale
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Scenario) Headscale() *hsic.HeadscaleInContainer {
|
||||
//nolint
|
||||
return s.controlServers["headscale"].(*hsic.HeadscaleInContainer)
|
||||
}
|
||||
|
||||
func (s *Scenario) CreatePreAuthKey(namespace string) (*v1.PreAuthKey, error) {
|
||||
if headscale, ok := s.controlServers["headscale"]; ok {
|
||||
key, err := headscale.CreateAuthKey(namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create namespace: %w", err)
|
||||
}
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to create namespace: %w", errNoHeadscaleAvailable)
|
||||
}
|
||||
|
||||
func (s *Scenario) CreateNamespace(namespace string) error {
|
||||
if headscale, ok := s.controlServers["headscale"]; ok {
|
||||
err := headscale.CreateNamespace(namespace)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create namespace: %w", err)
|
||||
}
|
||||
|
||||
s.namespaces[namespace] = &Namespace{
|
||||
Clients: make(map[string]TailscaleClient),
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to create namespace: %w", errNoHeadscaleAvailable)
|
||||
}
|
||||
|
||||
/// Client related stuff
|
||||
|
||||
func (s *Scenario) CreateTailscaleNodesInNamespace(
|
||||
namespaceStr string,
|
||||
requestedVersion string,
|
||||
count int,
|
||||
) error {
|
||||
if namespace, ok := s.namespaces[namespaceStr]; ok {
|
||||
for i := 0; i < count; i++ {
|
||||
version := requestedVersion
|
||||
if requestedVersion == "all" {
|
||||
version = TailscaleVersions[i%len(TailscaleVersions)]
|
||||
}
|
||||
|
||||
namespace.createWaitGroup.Add(1)
|
||||
|
||||
go func() {
|
||||
defer namespace.createWaitGroup.Done()
|
||||
|
||||
// TODO(kradalby): error handle this
|
||||
tsClient, err := tsic.New(s.pool, version, s.network)
|
||||
if err != nil {
|
||||
// return fmt.Errorf("failed to add tailscale node: %w", err)
|
||||
log.Printf("failed to add tailscale node: %s", err)
|
||||
}
|
||||
|
||||
namespace.Clients[tsClient.Hostname()] = tsClient
|
||||
}()
|
||||
}
|
||||
namespace.createWaitGroup.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to add tailscale node: %w", errNoNamespaceAvailable)
|
||||
}
|
||||
|
||||
func (s *Scenario) RunTailscaleUp(
|
||||
namespaceStr, loginServer, authKey string,
|
||||
) error {
|
||||
if namespace, ok := s.namespaces[namespaceStr]; ok {
|
||||
for _, client := range namespace.Clients {
|
||||
namespace.joinWaitGroup.Add(1)
|
||||
|
||||
go func(c TailscaleClient) {
|
||||
defer namespace.joinWaitGroup.Done()
|
||||
|
||||
// TODO(kradalby): error handle this
|
||||
_ = c.Up(loginServer, authKey)
|
||||
}(client)
|
||||
}
|
||||
namespace.joinWaitGroup.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to up tailscale node: %w", errNoNamespaceAvailable)
|
||||
}
|
||||
|
||||
func (s *Scenario) CountTailscale() int {
|
||||
count := 0
|
||||
|
||||
for _, namespace := range s.namespaces {
|
||||
count += len(namespace.Clients)
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
func (s *Scenario) WaitForTailscaleSync() error {
|
||||
tsCount := s.CountTailscale()
|
||||
|
||||
for _, namespace := range s.namespaces {
|
||||
for _, client := range namespace.Clients {
|
||||
namespace.syncWaitGroup.Add(1)
|
||||
|
||||
go func(c TailscaleClient) {
|
||||
defer namespace.syncWaitGroup.Done()
|
||||
|
||||
// TODO(kradalby): error handle this
|
||||
_ = c.WaitForPeers(tsCount)
|
||||
}(client)
|
||||
}
|
||||
namespace.syncWaitGroup.Wait()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateHeadscaleEnv is a conventient method returning a set up Headcale
|
||||
// test environment with nodes of all versions, joined to the server with X
|
||||
// namespaces.
|
||||
func (s *Scenario) CreateHeadscaleEnv(namespaces map[string]int) error {
|
||||
err := s.StartHeadscale()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.Headscale().WaitForReady()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for namespaceName, clientCount := range namespaces {
|
||||
err = s.CreateNamespace(namespaceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.CreateTailscaleNodesInNamespace(namespaceName, "all", clientCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key, err := s.CreatePreAuthKey(namespaceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.RunTailscaleUp(namespaceName, s.Headscale().GetEndpoint(), key.GetKey())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Scenario) GetIPs(namespace string) ([]netip.Addr, error) {
|
||||
var ips []netip.Addr
|
||||
if ns, ok := s.namespaces[namespace]; ok {
|
||||
for _, client := range ns.Clients {
|
||||
clientIps, err := client.IPs()
|
||||
if err != nil {
|
||||
return ips, fmt.Errorf("failed to get ips: %w", err)
|
||||
}
|
||||
ips = append(ips, clientIps...)
|
||||
}
|
||||
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
return ips, fmt.Errorf("failed to get ips: %w", errNoNamespaceAvailable)
|
||||
}
|
||||
|
||||
func (s *Scenario) GetClients(namespace string) ([]TailscaleClient, error) {
|
||||
var clients []TailscaleClient
|
||||
if ns, ok := s.namespaces[namespace]; ok {
|
||||
for _, client := range ns.Clients {
|
||||
clients = append(clients, client)
|
||||
}
|
||||
|
||||
return clients, nil
|
||||
}
|
||||
|
||||
return clients, fmt.Errorf("failed to get clients: %w", errNoNamespaceAvailable)
|
||||
}
|
||||
|
||||
func (s *Scenario) ListTailscaleClients(namespaces ...string) ([]TailscaleClient, error) {
|
||||
var allClients []TailscaleClient
|
||||
|
||||
if len(namespaces) == 0 {
|
||||
namespaces = s.Namespaces()
|
||||
}
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
clients, err := s.GetClients(namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allClients = append(allClients, clients...)
|
||||
}
|
||||
|
||||
return allClients, nil
|
||||
}
|
||||
|
||||
func (s *Scenario) ListTailscaleClientsIPs(namespaces ...string) ([]netip.Addr, error) {
|
||||
var allIps []netip.Addr
|
||||
|
||||
if len(namespaces) == 0 {
|
||||
namespaces = s.Namespaces()
|
||||
}
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
ips, err := s.GetIPs(namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allIps = append(allIps, ips...)
|
||||
}
|
||||
|
||||
return allIps, nil
|
||||
}
|
||||
|
||||
func (s *Scenario) ListTailscaleClientsFQDNs(namespaces ...string) ([]string, error) {
|
||||
allFQDNs := make([]string, 0)
|
||||
|
||||
clients, err := s.ListTailscaleClients(namespaces...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, client := range clients {
|
||||
fqdn, err := client.FQDN()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allFQDNs = append(allFQDNs, fqdn)
|
||||
}
|
||||
|
||||
return allFQDNs, nil
|
||||
}
|
||||
181
integration/scenario_test.go
Normal file
181
integration/scenario_test.go
Normal file
@@ -0,0 +1,181 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||
)
|
||||
|
||||
// This file is intendet to "test the test framework", by proxy it will also test
|
||||
// some Headcsale/Tailscale stuff, but mostly in very simple ways.
|
||||
|
||||
func IntegrationSkip(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
if !dockertestutil.IsRunningInContainer() {
|
||||
t.Skip("not running in docker, skipping")
|
||||
}
|
||||
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration tests due to short flag")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeadscale(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
var err error
|
||||
|
||||
namespace := "test-space"
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
t.Run("start-headscale", func(t *testing.T) {
|
||||
err = scenario.StartHeadscale()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create start headcale: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.Headscale().WaitForReady()
|
||||
if err != nil {
|
||||
t.Errorf("headscale failed to become ready: %s", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("create-namespace", func(t *testing.T) {
|
||||
err := scenario.CreateNamespace(namespace)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create namespace: %s", err)
|
||||
}
|
||||
|
||||
if _, ok := scenario.namespaces[namespace]; !ok {
|
||||
t.Errorf("namespace is not in scenario")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("create-auth-key", func(t *testing.T) {
|
||||
_, err := scenario.CreatePreAuthKey(namespace)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create preauthkey: %s", err)
|
||||
}
|
||||
})
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateTailscale(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
namespace := "only-create-containers"
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
scenario.namespaces[namespace] = &Namespace{
|
||||
Clients: make(map[string]TailscaleClient),
|
||||
}
|
||||
|
||||
t.Run("create-tailscale", func(t *testing.T) {
|
||||
err := scenario.CreateTailscaleNodesInNamespace(namespace, "all", 3)
|
||||
if err != nil {
|
||||
t.Errorf("failed to add tailscale nodes: %s", err)
|
||||
}
|
||||
|
||||
if clients := len(scenario.namespaces[namespace].Clients); clients != 3 {
|
||||
t.Errorf("wrong number of tailscale clients: %d != %d", clients, 3)
|
||||
}
|
||||
|
||||
// TODO(kradalby): Test "all" version logic
|
||||
})
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTailscaleNodesJoiningHeadcale(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
var err error
|
||||
|
||||
namespace := "join-node-test"
|
||||
|
||||
count := 1
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
t.Run("start-headscale", func(t *testing.T) {
|
||||
err = scenario.StartHeadscale()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create start headcale: %s", err)
|
||||
}
|
||||
|
||||
headscale := scenario.Headscale()
|
||||
err = headscale.WaitForReady()
|
||||
if err != nil {
|
||||
t.Errorf("headscale failed to become ready: %s", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("create-namespace", func(t *testing.T) {
|
||||
err := scenario.CreateNamespace(namespace)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create namespace: %s", err)
|
||||
}
|
||||
|
||||
if _, ok := scenario.namespaces[namespace]; !ok {
|
||||
t.Errorf("namespace is not in scenario")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("create-tailscale", func(t *testing.T) {
|
||||
err := scenario.CreateTailscaleNodesInNamespace(namespace, "1.30.2", count)
|
||||
if err != nil {
|
||||
t.Errorf("failed to add tailscale nodes: %s", err)
|
||||
}
|
||||
|
||||
if clients := len(scenario.namespaces[namespace].Clients); clients != count {
|
||||
t.Errorf("wrong number of tailscale clients: %d != %d", clients, count)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("join-headscale", func(t *testing.T) {
|
||||
key, err := scenario.CreatePreAuthKey(namespace)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create preauthkey: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.RunTailscaleUp(namespace, scenario.Headscale().GetEndpoint(), key.GetKey())
|
||||
if err != nil {
|
||||
t.Errorf("failed to login: %s", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("get-ips", func(t *testing.T) {
|
||||
ips, err := scenario.GetIPs(namespace)
|
||||
if err != nil {
|
||||
t.Errorf("failed to get tailscale ips: %s", err)
|
||||
}
|
||||
|
||||
if len(ips) != count*2 {
|
||||
t.Errorf("got the wrong amount of tailscale ips, %d != %d", len(ips), count*2)
|
||||
}
|
||||
})
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
23
integration/tailscale.go
Normal file
23
integration/tailscale.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"net/url"
|
||||
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
)
|
||||
|
||||
type TailscaleClient interface {
|
||||
Hostname() string
|
||||
Shutdown() error
|
||||
Version() string
|
||||
Execute(command []string) (string, string, error)
|
||||
Up(loginServer, authKey string) error
|
||||
UpWithLoginURL(loginServer string) (*url.URL, error)
|
||||
IPs() ([]netip.Addr, error)
|
||||
FQDN() (string, error)
|
||||
Status() (*ipnstate.Status, error)
|
||||
WaitForReady() error
|
||||
WaitForPeers(expected int) error
|
||||
Ping(hostnameOrIP string) error
|
||||
}
|
||||
359
integration/tsic/tsic.go
Normal file
359
integration/tsic/tsic.go
Normal file
@@ -0,0 +1,359 @@
|
||||
package tsic
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/juanfont/headscale"
|
||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
)
|
||||
|
||||
const (
|
||||
tsicHashLength = 6
|
||||
dockerContextPath = "../."
|
||||
)
|
||||
|
||||
var (
|
||||
errTailscalePingFailed = errors.New("ping failed")
|
||||
errTailscaleNotLoggedIn = errors.New("tailscale not logged in")
|
||||
errTailscaleWrongPeerCount = errors.New("wrong peer count")
|
||||
errTailscaleNotConnected = errors.New("tailscale not connected")
|
||||
)
|
||||
|
||||
type TailscaleInContainer struct {
|
||||
version string
|
||||
hostname string
|
||||
|
||||
pool *dockertest.Pool
|
||||
container *dockertest.Resource
|
||||
network *dockertest.Network
|
||||
|
||||
// "cache"
|
||||
ips []netip.Addr
|
||||
fqdn string
|
||||
}
|
||||
|
||||
func New(
|
||||
pool *dockertest.Pool,
|
||||
version string,
|
||||
network *dockertest.Network,
|
||||
) (*TailscaleInContainer, error) {
|
||||
hash, err := headscale.GenerateRandomStringDNSSafe(tsicHashLength)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hostname := fmt.Sprintf("ts-%s-%s", strings.ReplaceAll(version, ".", "-"), hash)
|
||||
|
||||
// TODO(kradalby): figure out why we need to "refresh" the network here.
|
||||
// network, err = dockertestutil.GetFirstOrCreateNetwork(pool, network.Network.Name)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
|
||||
tailscaleOptions := &dockertest.RunOptions{
|
||||
Name: hostname,
|
||||
Networks: []*dockertest.Network{network},
|
||||
Cmd: []string{
|
||||
"tailscaled", "--tun=tsdev",
|
||||
},
|
||||
}
|
||||
|
||||
// dockertest isnt very good at handling containers that has already
|
||||
// been created, this is an attempt to make sure this container isnt
|
||||
// present.
|
||||
err = pool.RemoveContainerByName(hostname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
container, err := pool.BuildAndRunWithBuildOptions(
|
||||
createTailscaleBuildOptions(version),
|
||||
tailscaleOptions,
|
||||
dockertestutil.DockerRestartPolicy,
|
||||
dockertestutil.DockerAllowLocalIPv6,
|
||||
dockertestutil.DockerAllowNetworkAdministration,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not start tailscale container: %w", err)
|
||||
}
|
||||
log.Printf("Created %s container\n", hostname)
|
||||
|
||||
return &TailscaleInContainer{
|
||||
version: version,
|
||||
hostname: hostname,
|
||||
|
||||
pool: pool,
|
||||
container: container,
|
||||
network: network,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) Shutdown() error {
|
||||
return t.pool.Purge(t.container)
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) Hostname() string {
|
||||
return t.hostname
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) Version() string {
|
||||
return t.version
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) Execute(
|
||||
command []string,
|
||||
) (string, string, error) {
|
||||
log.Println("command", command)
|
||||
log.Printf("running command for %s\n", t.hostname)
|
||||
stdout, stderr, err := dockertestutil.ExecuteCommand(
|
||||
t.container,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
if err != nil {
|
||||
log.Printf("command stderr: %s\n", stderr)
|
||||
|
||||
if stdout != "" {
|
||||
log.Printf("command stdout: %s\n", stdout)
|
||||
}
|
||||
|
||||
if strings.Contains(stderr, "NeedsLogin") {
|
||||
return stdout, stderr, errTailscaleNotLoggedIn
|
||||
}
|
||||
|
||||
return stdout, stderr, err
|
||||
}
|
||||
|
||||
return stdout, stderr, nil
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) Up(
|
||||
loginServer, authKey string,
|
||||
) error {
|
||||
command := []string{
|
||||
"tailscale",
|
||||
"up",
|
||||
"-login-server",
|
||||
loginServer,
|
||||
"--authkey",
|
||||
authKey,
|
||||
"--hostname",
|
||||
t.hostname,
|
||||
}
|
||||
|
||||
if _, _, err := t.Execute(command); err != nil {
|
||||
return fmt.Errorf("failed to join tailscale client: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) UpWithLoginURL(
|
||||
loginServer string,
|
||||
) (*url.URL, error) {
|
||||
command := []string{
|
||||
"tailscale",
|
||||
"up",
|
||||
"-login-server",
|
||||
loginServer,
|
||||
"--hostname",
|
||||
t.hostname,
|
||||
}
|
||||
|
||||
_, stderr, _ := t.Execute(command)
|
||||
|
||||
urlStr := strings.ReplaceAll(stderr, "\nTo authenticate, visit:\n\n\t", "")
|
||||
urlStr = strings.TrimSpace(urlStr)
|
||||
|
||||
// parse URL
|
||||
loginURL, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
log.Printf("Could not parse login URL: %s", err)
|
||||
log.Printf("Original join command result: %s", stderr)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return loginURL, nil
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) IPs() ([]netip.Addr, error) {
|
||||
if t.ips != nil && len(t.ips) != 0 {
|
||||
return t.ips, nil
|
||||
}
|
||||
|
||||
ips := make([]netip.Addr, 0)
|
||||
|
||||
command := []string{
|
||||
"tailscale",
|
||||
"ip",
|
||||
}
|
||||
|
||||
result, _, err := t.Execute(command)
|
||||
if err != nil {
|
||||
return []netip.Addr{}, fmt.Errorf("failed to join tailscale client: %w", err)
|
||||
}
|
||||
|
||||
for _, address := range strings.Split(result, "\n") {
|
||||
address = strings.TrimSuffix(address, "\n")
|
||||
if len(address) < 1 {
|
||||
continue
|
||||
}
|
||||
ip, err := netip.ParseAddr(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ips = append(ips, ip)
|
||||
}
|
||||
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) Status() (*ipnstate.Status, error) {
|
||||
command := []string{
|
||||
"tailscale",
|
||||
"status",
|
||||
"--json",
|
||||
}
|
||||
|
||||
result, _, err := t.Execute(command)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute tailscale status command: %w", err)
|
||||
}
|
||||
|
||||
var status ipnstate.Status
|
||||
err = json.Unmarshal([]byte(result), &status)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal tailscale status: %w", err)
|
||||
}
|
||||
|
||||
return &status, err
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) FQDN() (string, error) {
|
||||
if t.fqdn != "" {
|
||||
return t.fqdn, nil
|
||||
}
|
||||
|
||||
status, err := t.Status()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get FQDN: %w", err)
|
||||
}
|
||||
|
||||
return status.Self.DNSName, nil
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) WaitForReady() error {
|
||||
return t.pool.Retry(func() error {
|
||||
status, err := t.Status()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch tailscale status: %w", err)
|
||||
}
|
||||
|
||||
if status.CurrentTailnet != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errTailscaleNotConnected
|
||||
})
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) WaitForPeers(expected int) error {
|
||||
return t.pool.Retry(func() error {
|
||||
status, err := t.Status()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch tailscale status: %w", err)
|
||||
}
|
||||
|
||||
if peers := status.Peers(); len(peers) != expected {
|
||||
return errTailscaleWrongPeerCount
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// TODO(kradalby): Make multiping, go routine magic.
|
||||
func (t *TailscaleInContainer) Ping(hostnameOrIP string) error {
|
||||
return t.pool.Retry(func() error {
|
||||
command := []string{
|
||||
"tailscale", "ping",
|
||||
"--timeout=1s",
|
||||
"--c=10",
|
||||
"--until-direct=true",
|
||||
hostnameOrIP,
|
||||
}
|
||||
|
||||
result, _, err := t.Execute(command)
|
||||
if err != nil {
|
||||
log.Printf(
|
||||
"failed to run ping command from %s to %s, err: %s",
|
||||
t.Hostname(),
|
||||
hostnameOrIP,
|
||||
err,
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if !strings.Contains(result, "pong") && !strings.Contains(result, "is local") {
|
||||
return backoff.Permanent(errTailscalePingFailed)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func createTailscaleBuildOptions(version string) *dockertest.BuildOptions {
|
||||
var tailscaleBuildOptions *dockertest.BuildOptions
|
||||
switch version {
|
||||
case "head":
|
||||
tailscaleBuildOptions = &dockertest.BuildOptions{
|
||||
Dockerfile: "Dockerfile.tailscale-HEAD",
|
||||
ContextDir: dockerContextPath,
|
||||
BuildArgs: []docker.BuildArg{},
|
||||
}
|
||||
case "unstable":
|
||||
tailscaleBuildOptions = &dockertest.BuildOptions{
|
||||
Dockerfile: "Dockerfile.tailscale",
|
||||
ContextDir: dockerContextPath,
|
||||
BuildArgs: []docker.BuildArg{
|
||||
{
|
||||
Name: "TAILSCALE_VERSION",
|
||||
Value: "*", // Installs the latest version https://askubuntu.com/a/824926
|
||||
},
|
||||
{
|
||||
Name: "TAILSCALE_CHANNEL",
|
||||
Value: "unstable",
|
||||
},
|
||||
},
|
||||
}
|
||||
default:
|
||||
tailscaleBuildOptions = &dockertest.BuildOptions{
|
||||
Dockerfile: "Dockerfile.tailscale",
|
||||
ContextDir: dockerContextPath,
|
||||
BuildArgs: []docker.BuildArg{
|
||||
{
|
||||
Name: "TAILSCALE_VERSION",
|
||||
Value: version,
|
||||
},
|
||||
{
|
||||
Name: "TAILSCALE_CHANNEL",
|
||||
Value: "stable",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return tailscaleBuildOptions
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,29 +1,42 @@
|
||||
//go:build integration
|
||||
// +build integration
|
||||
|
||||
//nolint
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
"inet.af/netaddr"
|
||||
)
|
||||
|
||||
const DOCKER_EXECUTE_TIMEOUT = 10 * time.Second
|
||||
const (
|
||||
headscaleNetwork = "headscale-test"
|
||||
headscaleHostname = "headscale"
|
||||
DOCKER_EXECUTE_TIMEOUT = 10 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
IpPrefix4 = netaddr.MustParseIPPrefix("100.64.0.0/10")
|
||||
IpPrefix6 = netaddr.MustParseIPPrefix("fd7a:115c:a1e0::/48")
|
||||
errEnvVarEmpty = errors.New("getenv: environment variable empty")
|
||||
|
||||
IpPrefix4 = netip.MustParsePrefix("100.64.0.0/10")
|
||||
IpPrefix6 = netip.MustParsePrefix("fd7a:115c:a1e0::/48")
|
||||
|
||||
tailscaleVersions = []string{
|
||||
"head",
|
||||
"unstable",
|
||||
"1.24.0",
|
||||
"1.32.0",
|
||||
"1.30.2",
|
||||
"1.28.0",
|
||||
"1.26.2",
|
||||
"1.24.2",
|
||||
"1.22.2",
|
||||
"1.20.4",
|
||||
"1.18.2",
|
||||
@@ -56,7 +69,7 @@ func ExecuteCommand(
|
||||
cmd []string,
|
||||
env []string,
|
||||
options ...ExecuteCommandOption,
|
||||
) (string, error) {
|
||||
) (string, string, error) {
|
||||
var stdout bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
|
||||
@@ -66,7 +79,7 @@ func ExecuteCommand(
|
||||
|
||||
for _, opt := range options {
|
||||
if err := opt(&execConfig); err != nil {
|
||||
return "", fmt.Errorf("execute-command/options: %w", err)
|
||||
return "", "", fmt.Errorf("execute-command/options: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -95,7 +108,7 @@ func ExecuteCommand(
|
||||
select {
|
||||
case res := <-resultChan:
|
||||
if res.err != nil {
|
||||
return "", res.err
|
||||
return stdout.String(), stderr.String(), res.err
|
||||
}
|
||||
|
||||
if res.exitCode != 0 {
|
||||
@@ -103,13 +116,19 @@ func ExecuteCommand(
|
||||
fmt.Println("stdout: ", stdout.String())
|
||||
fmt.Println("stderr: ", stderr.String())
|
||||
|
||||
return "", fmt.Errorf("command failed with: %s", stderr.String())
|
||||
return stdout.String(), stderr.String(), fmt.Errorf(
|
||||
"command failed with: %s",
|
||||
stderr.String(),
|
||||
)
|
||||
}
|
||||
|
||||
return stdout.String(), nil
|
||||
return stdout.String(), stderr.String(), nil
|
||||
case <-time.After(execConfig.timeout):
|
||||
|
||||
return "", fmt.Errorf("command timed out after %s", execConfig.timeout)
|
||||
return stdout.String(), stderr.String(), fmt.Errorf(
|
||||
"command timed out after %s",
|
||||
execConfig.timeout,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -183,12 +202,12 @@ func getDockerBuildOptions(version string) *dockertest.BuildOptions {
|
||||
|
||||
func getIPs(
|
||||
tailscales map[string]dockertest.Resource,
|
||||
) (map[string][]netaddr.IP, error) {
|
||||
ips := make(map[string][]netaddr.IP)
|
||||
) (map[string][]netip.Addr, error) {
|
||||
ips := make(map[string][]netip.Addr)
|
||||
for hostname, tailscale := range tailscales {
|
||||
command := []string{"tailscale", "ip"}
|
||||
|
||||
result, err := ExecuteCommand(
|
||||
result, _, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
@@ -202,7 +221,7 @@ func getIPs(
|
||||
if len(address) < 1 {
|
||||
continue
|
||||
}
|
||||
ip, err := netaddr.ParseIP(address)
|
||||
ip, err := netip.ParseAddr(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -212,3 +231,113 @@ func getIPs(
|
||||
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
func getDNSNames(
|
||||
headscale *dockertest.Resource,
|
||||
) ([]string, error) {
|
||||
listAllResult, _, err := ExecuteCommand(
|
||||
headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"nodes",
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
[]string{},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var listAll []v1.Machine
|
||||
err = json.Unmarshal([]byte(listAllResult), &listAll)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hostnames := make([]string, len(listAll))
|
||||
|
||||
for index := range listAll {
|
||||
hostnames[index] = listAll[index].GetGivenName()
|
||||
}
|
||||
|
||||
return hostnames, nil
|
||||
}
|
||||
|
||||
func getMagicFQDN(
|
||||
headscale *dockertest.Resource,
|
||||
) ([]string, error) {
|
||||
listAllResult, _, err := ExecuteCommand(
|
||||
headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"nodes",
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
[]string{},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var listAll []v1.Machine
|
||||
err = json.Unmarshal([]byte(listAllResult), &listAll)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hostnames := make([]string, len(listAll))
|
||||
|
||||
for index := range listAll {
|
||||
hostnames[index] = fmt.Sprintf(
|
||||
"%s.%s.headscale.net",
|
||||
listAll[index].GetGivenName(),
|
||||
listAll[index].GetNamespace().GetName(),
|
||||
)
|
||||
}
|
||||
|
||||
return hostnames, nil
|
||||
}
|
||||
|
||||
func GetEnvStr(key string) (string, error) {
|
||||
v := os.Getenv(key)
|
||||
if v == "" {
|
||||
return v, errEnvVarEmpty
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func GetEnvBool(key string) (bool, error) {
|
||||
s, err := GetEnvStr(key)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
v, err := strconv.ParseBool(s)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func GetFirstOrCreateNetwork(pool *dockertest.Pool, name string) (dockertest.Network, error) {
|
||||
networks, err := pool.NetworksByName(name)
|
||||
if err != nil || len(networks) == 0 {
|
||||
if _, err := pool.CreateNetwork(name); err == nil {
|
||||
// Create does not give us an updated version of the resource, so we need to
|
||||
// get it again.
|
||||
networks, err := pool.NetworksByName(name)
|
||||
if err != nil {
|
||||
return dockertest.Network{}, err
|
||||
}
|
||||
|
||||
return networks[0], nil
|
||||
}
|
||||
}
|
||||
|
||||
return networks[0], nil
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build integration
|
||||
|
||||
//nolint
|
||||
package headscale
|
||||
|
||||
import (
|
||||
@@ -8,7 +7,6 @@ import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
@@ -18,63 +16,76 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ccding/go-stun/stun"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/ccding/go-stun/stun"
|
||||
)
|
||||
|
||||
const (
|
||||
headscaleHostname = "headscale-derp"
|
||||
namespaceName = "derpnamespace"
|
||||
totalContainers = 3
|
||||
headscaleDerpHostname = "headscale-derp"
|
||||
namespaceName = "derpnamespace"
|
||||
totalContainers = 3
|
||||
)
|
||||
|
||||
type IntegrationDERPTestSuite struct {
|
||||
suite.Suite
|
||||
stats *suite.SuiteInformation
|
||||
|
||||
pool dockertest.Pool
|
||||
networks map[int]dockertest.Network // so we keep the containers isolated
|
||||
headscale dockertest.Resource
|
||||
pool dockertest.Pool
|
||||
network dockertest.Network
|
||||
containerNetworks map[int]dockertest.Network // so we keep the containers isolated
|
||||
headscale dockertest.Resource
|
||||
saveLogs bool
|
||||
|
||||
tailscales map[string]dockertest.Resource
|
||||
joinWaitGroup sync.WaitGroup
|
||||
}
|
||||
|
||||
func TestDERPIntegrationTestSuite(t *testing.T) {
|
||||
func TestIntegrationDERPTestSuite(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration tests due to short flag")
|
||||
}
|
||||
|
||||
saveLogs, err := GetEnvBool("HEADSCALE_INTEGRATION_SAVE_LOG")
|
||||
if err != nil {
|
||||
saveLogs = false
|
||||
}
|
||||
|
||||
s := new(IntegrationDERPTestSuite)
|
||||
|
||||
s.tailscales = make(map[string]dockertest.Resource)
|
||||
s.networks = make(map[int]dockertest.Network)
|
||||
s.containerNetworks = make(map[int]dockertest.Network)
|
||||
s.saveLogs = saveLogs
|
||||
|
||||
suite.Run(t, s)
|
||||
|
||||
// HandleStats, which allows us to check if we passed and save logs
|
||||
// is called after TearDown, so we cannot tear down containers before
|
||||
// we have potentially saved the logs.
|
||||
for _, tailscale := range s.tailscales {
|
||||
if err := s.pool.Purge(&tailscale); err != nil {
|
||||
if s.saveLogs {
|
||||
for _, tailscale := range s.tailscales {
|
||||
if err := s.pool.Purge(&tailscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !s.stats.Passed() {
|
||||
err := s.saveLog(&s.headscale, "test_output")
|
||||
if err != nil {
|
||||
log.Printf("Could not save log: %s\n", err)
|
||||
}
|
||||
}
|
||||
if err := s.pool.Purge(&s.headscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !s.stats.Passed() {
|
||||
err := s.saveLog(&s.headscale, "test_output")
|
||||
if err != nil {
|
||||
log.Printf("Could not save log: %s\n", err)
|
||||
}
|
||||
}
|
||||
if err := s.pool.Purge(&s.headscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
|
||||
for _, network := range s.networks {
|
||||
if err := network.Close(); err != nil {
|
||||
log.Printf("Could not close network: %s\n", err)
|
||||
for _, network := range s.containerNetworks {
|
||||
if err := network.Close(); err != nil {
|
||||
log.Printf("Could not close network: %s\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -83,14 +94,20 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
|
||||
if ppool, err := dockertest.NewPool(""); err == nil {
|
||||
s.pool = *ppool
|
||||
} else {
|
||||
log.Fatalf("Could not connect to docker: %s", err)
|
||||
s.FailNow(fmt.Sprintf("Could not connect to docker: %s", err), "")
|
||||
}
|
||||
|
||||
network, err := GetFirstOrCreateNetwork(&s.pool, headscaleNetwork)
|
||||
if err != nil {
|
||||
s.FailNow(fmt.Sprintf("Failed to create or get network: %s", err), "")
|
||||
}
|
||||
s.network = network
|
||||
|
||||
for i := 0; i < totalContainers; i++ {
|
||||
if pnetwork, err := s.pool.CreateNetwork(fmt.Sprintf("headscale-derp-%d", i)); err == nil {
|
||||
s.networks[i] = *pnetwork
|
||||
s.containerNetworks[i] = *pnetwork
|
||||
} else {
|
||||
log.Fatalf("Could not create network: %s", err)
|
||||
s.FailNow(fmt.Sprintf("Could not create network: %s", err), "")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,15 +118,19 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
|
||||
|
||||
currentPath, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatalf("Could not determine current path: %s", err)
|
||||
s.FailNow(fmt.Sprintf("Could not determine current path: %s", err), "")
|
||||
}
|
||||
|
||||
headscaleOptions := &dockertest.RunOptions{
|
||||
Name: headscaleHostname,
|
||||
Name: headscaleDerpHostname,
|
||||
Mounts: []string{
|
||||
fmt.Sprintf("%s/integration_test/etc_embedded_derp:/etc/headscale", currentPath),
|
||||
fmt.Sprintf(
|
||||
"%s/integration_test/etc_embedded_derp:/etc/headscale",
|
||||
currentPath,
|
||||
),
|
||||
},
|
||||
Cmd: []string{"headscale", "serve"},
|
||||
Networks: []*dockertest.Network{&s.network},
|
||||
ExposedPorts: []string{"8443/tcp", "3478/udp"},
|
||||
PortBindings: map[docker.Port][]docker.PortBinding{
|
||||
"8443/tcp": {{HostPort: "8443"}},
|
||||
@@ -117,28 +138,41 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
|
||||
},
|
||||
}
|
||||
|
||||
log.Println("Creating headscale container")
|
||||
err = s.pool.RemoveContainerByName(headscaleDerpHostname)
|
||||
if err != nil {
|
||||
s.FailNow(
|
||||
fmt.Sprintf(
|
||||
"Could not remove existing container before building test: %s",
|
||||
err,
|
||||
),
|
||||
"",
|
||||
)
|
||||
}
|
||||
|
||||
log.Println("Creating headscale container for DERP integration tests")
|
||||
if pheadscale, err := s.pool.BuildAndRunWithBuildOptions(headscaleBuildOptions, headscaleOptions, DockerRestartPolicy); err == nil {
|
||||
s.headscale = *pheadscale
|
||||
} else {
|
||||
log.Fatalf("Could not start headscale container: %s", err)
|
||||
s.FailNow(fmt.Sprintf("Could not start headscale container: %s", err), "")
|
||||
}
|
||||
log.Println("Created headscale container to test DERP")
|
||||
log.Println("Created headscale container for embedded DERP tests")
|
||||
|
||||
log.Println("Creating tailscale containers")
|
||||
log.Println("Creating tailscale containers for embedded DERP tests")
|
||||
|
||||
for i := 0; i < totalContainers; i++ {
|
||||
version := tailscaleVersions[i%len(tailscaleVersions)]
|
||||
hostname, container := s.tailscaleContainer(
|
||||
fmt.Sprint(i),
|
||||
version,
|
||||
s.networks[i],
|
||||
s.containerNetworks[i],
|
||||
)
|
||||
s.tailscales[hostname] = *container
|
||||
}
|
||||
|
||||
log.Println("Waiting for headscale to be ready")
|
||||
hostEndpoint := fmt.Sprintf("localhost:%s", s.headscale.GetPort("8443/tcp"))
|
||||
log.Println("Waiting for headscale to be ready for embedded DERP tests")
|
||||
hostEndpoint := fmt.Sprintf("%s:%s",
|
||||
s.headscale.GetIPInNetwork(&s.network),
|
||||
s.headscale.GetPort("8443/tcp"))
|
||||
|
||||
if err := s.pool.Retry(func() error {
|
||||
url := fmt.Sprintf("https://%s/health", hostEndpoint)
|
||||
@@ -147,6 +181,7 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
|
||||
client := &http.Client{Transport: insecureTransport}
|
||||
resp, err := client.Get(url)
|
||||
if err != nil {
|
||||
fmt.Printf("headscale for embedded DERP tests is not ready: %s\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -162,10 +197,10 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
|
||||
// https://github.com/stretchr/testify/issues/849
|
||||
return // fmt.Errorf("Could not connect to headscale: %s", err)
|
||||
}
|
||||
log.Println("headscale container is ready")
|
||||
log.Println("headscale container is ready for embedded DERP tests")
|
||||
|
||||
log.Printf("Creating headscale namespace: %s\n", namespaceName)
|
||||
result, err := ExecuteCommand(
|
||||
result, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{"headscale", "namespaces", "create", namespaceName},
|
||||
[]string{},
|
||||
@@ -174,7 +209,7 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
log.Printf("Creating pre auth key for %s\n", namespaceName)
|
||||
preAuthResult, err := ExecuteCommand(
|
||||
preAuthResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -197,7 +232,10 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
|
||||
assert.Nil(s.T(), err)
|
||||
assert.True(s.T(), preAuthKey.Reusable)
|
||||
|
||||
headscaleEndpoint := fmt.Sprintf("https://headscale:%s", s.headscale.GetPort("8443/tcp"))
|
||||
headscaleEndpoint := fmt.Sprintf(
|
||||
"https://headscale:%s",
|
||||
s.headscale.GetPort("8443/tcp"),
|
||||
)
|
||||
|
||||
log.Printf(
|
||||
"Joining tailscale containers to headscale at %s\n",
|
||||
@@ -234,7 +272,7 @@ func (s *IntegrationDERPTestSuite) Join(
|
||||
|
||||
log.Println("Join command:", command)
|
||||
log.Printf("Running join command for %s\n", hostname)
|
||||
_, err := ExecuteCommand(
|
||||
_, _, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
@@ -243,7 +281,9 @@ func (s *IntegrationDERPTestSuite) Join(
|
||||
log.Printf("%s joined\n", hostname)
|
||||
}
|
||||
|
||||
func (s *IntegrationDERPTestSuite) tailscaleContainer(identifier, version string, network dockertest.Network,
|
||||
func (s *IntegrationDERPTestSuite) tailscaleContainer(
|
||||
identifier, version string,
|
||||
network dockertest.Network,
|
||||
) (string, *dockertest.Resource) {
|
||||
tailscaleBuildOptions := getDockerBuildOptions(version)
|
||||
|
||||
@@ -260,7 +300,10 @@ func (s *IntegrationDERPTestSuite) tailscaleContainer(identifier, version string
|
||||
},
|
||||
|
||||
// expose the host IP address, so we can access it from inside the container
|
||||
ExtraHosts: []string{"host.docker.internal:host-gateway", "headscale:host-gateway"},
|
||||
ExtraHosts: []string{
|
||||
"host.docker.internal:host-gateway",
|
||||
"headscale:host-gateway",
|
||||
},
|
||||
}
|
||||
|
||||
pts, err := s.pool.BuildAndRunWithBuildOptions(
|
||||
@@ -279,6 +322,23 @@ func (s *IntegrationDERPTestSuite) tailscaleContainer(identifier, version string
|
||||
}
|
||||
|
||||
func (s *IntegrationDERPTestSuite) TearDownSuite() {
|
||||
if !s.saveLogs {
|
||||
for _, tailscale := range s.tailscales {
|
||||
if err := s.pool.Purge(&tailscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.pool.Purge(&s.headscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
|
||||
for _, network := range s.containerNetworks {
|
||||
if err := network.Close(); err != nil {
|
||||
log.Printf("Could not close network: %s\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationDERPTestSuite) HandleStats(
|
||||
@@ -320,7 +380,7 @@ func (s *IntegrationDERPTestSuite) saveLog(
|
||||
|
||||
log.Printf("Saving logs for %s to %s\n", resource.Container.Name, basePath)
|
||||
|
||||
err = ioutil.WriteFile(
|
||||
err = os.WriteFile(
|
||||
path.Join(basePath, resource.Container.Name+".stdout.log"),
|
||||
[]byte(stdout.String()),
|
||||
0o644,
|
||||
@@ -329,7 +389,7 @@ func (s *IntegrationDERPTestSuite) saveLog(
|
||||
return err
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(
|
||||
err = os.WriteFile(
|
||||
path.Join(basePath, resource.Container.Name+".stderr.log"),
|
||||
[]byte(stdout.String()),
|
||||
0o644,
|
||||
@@ -342,11 +402,14 @@ func (s *IntegrationDERPTestSuite) saveLog(
|
||||
}
|
||||
|
||||
func (s *IntegrationDERPTestSuite) TestPingAllPeersByHostname() {
|
||||
ips, err := getIPs(s.tailscales)
|
||||
hostnames, err := getDNSNames(&s.headscale)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
log.Printf("Hostnames: %#v\n", hostnames)
|
||||
|
||||
for hostname, tailscale := range s.tailscales {
|
||||
for peername := range ips {
|
||||
if peername == hostname {
|
||||
for _, peername := range hostnames {
|
||||
if strings.Contains(peername, hostname) {
|
||||
continue
|
||||
}
|
||||
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
|
||||
@@ -364,7 +427,7 @@ func (s *IntegrationDERPTestSuite) TestPingAllPeersByHostname() {
|
||||
peername,
|
||||
)
|
||||
log.Println(command)
|
||||
result, err := ExecuteCommand(
|
||||
result, _, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
@@ -378,7 +441,9 @@ func (s *IntegrationDERPTestSuite) TestPingAllPeersByHostname() {
|
||||
}
|
||||
|
||||
func (s *IntegrationDERPTestSuite) TestDERPSTUN() {
|
||||
headscaleSTUNAddr := fmt.Sprintf("localhost:%s", s.headscale.GetPort("3478/udp"))
|
||||
headscaleSTUNAddr := fmt.Sprintf("%s:%s",
|
||||
s.headscale.GetIPInNetwork(&s.network),
|
||||
s.headscale.GetPort("3478/udp"))
|
||||
client := stun.NewClient()
|
||||
client.SetVerbose(true)
|
||||
client.SetVVerbose(true)
|
||||
|
||||
558
integration_oidc_test.go
Normal file
558
integration_oidc_test.go
Normal file
@@ -0,0 +1,558 @@
|
||||
// nolint
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
const (
|
||||
oidcHeadscaleHostname = "headscale-oidc"
|
||||
oidcMockHostname = "headscale-mock-oidc"
|
||||
oidcNamespaceName = "oidcnamespace"
|
||||
totalOidcContainers = 3
|
||||
)
|
||||
|
||||
type IntegrationOIDCTestSuite struct {
|
||||
suite.Suite
|
||||
stats *suite.SuiteInformation
|
||||
|
||||
pool dockertest.Pool
|
||||
network dockertest.Network
|
||||
headscale dockertest.Resource
|
||||
mockOidc dockertest.Resource
|
||||
saveLogs bool
|
||||
|
||||
tailscales map[string]dockertest.Resource
|
||||
joinWaitGroup sync.WaitGroup
|
||||
}
|
||||
|
||||
func TestIntegrationOIDCTestSuite(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration tests due to short flag")
|
||||
}
|
||||
|
||||
saveLogs, err := GetEnvBool("HEADSCALE_INTEGRATION_SAVE_LOG")
|
||||
if err != nil {
|
||||
saveLogs = false
|
||||
}
|
||||
|
||||
s := new(IntegrationOIDCTestSuite)
|
||||
|
||||
s.tailscales = make(map[string]dockertest.Resource)
|
||||
s.saveLogs = saveLogs
|
||||
|
||||
suite.Run(t, s)
|
||||
|
||||
// HandleStats, which allows us to check if we passed and save logs
|
||||
// is called after TearDown, so we cannot tear down containers before
|
||||
// we have potentially saved the logs.
|
||||
if s.saveLogs {
|
||||
for _, tailscale := range s.tailscales {
|
||||
if err := s.pool.Purge(&tailscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !s.stats.Passed() {
|
||||
err := s.saveLog(&s.headscale, "test_output")
|
||||
if err != nil {
|
||||
log.Printf("Could not save log: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.pool.Purge(&s.mockOidc); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
|
||||
if err := s.pool.Purge(&s.headscale); err != nil {
|
||||
t.Logf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
|
||||
if err := s.network.Close(); err != nil {
|
||||
log.Printf("Could not close network: %s\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationOIDCTestSuite) SetupSuite() {
|
||||
if ppool, err := dockertest.NewPool(""); err == nil {
|
||||
s.pool = *ppool
|
||||
} else {
|
||||
s.FailNow(fmt.Sprintf("Could not connect to docker: %s", err), "")
|
||||
}
|
||||
|
||||
network, err := GetFirstOrCreateNetwork(&s.pool, headscaleNetwork)
|
||||
if err != nil {
|
||||
s.FailNow(fmt.Sprintf("Failed to create or get network: %s", err), "")
|
||||
}
|
||||
s.network = network
|
||||
|
||||
log.Printf("Network config: %v", s.network.Network.IPAM.Config[0])
|
||||
|
||||
s.Suite.T().Log("Setting up mock OIDC")
|
||||
mockOidcOptions := &dockertest.RunOptions{
|
||||
Name: oidcMockHostname,
|
||||
Cmd: []string{"headscale", "mockoidc"},
|
||||
ExposedPorts: []string{"10000/tcp"},
|
||||
PortBindings: map[docker.Port][]docker.PortBinding{
|
||||
"10000/tcp": {{HostPort: "10000"}},
|
||||
},
|
||||
Networks: []*dockertest.Network{&s.network},
|
||||
Env: []string{
|
||||
fmt.Sprintf("MOCKOIDC_ADDR=%s", oidcMockHostname),
|
||||
"MOCKOIDC_PORT=10000",
|
||||
"MOCKOIDC_CLIENT_ID=superclient",
|
||||
"MOCKOIDC_CLIENT_SECRET=supersecret",
|
||||
},
|
||||
}
|
||||
|
||||
headscaleBuildOptions := &dockertest.BuildOptions{
|
||||
Dockerfile: "Dockerfile.debug",
|
||||
ContextDir: ".",
|
||||
}
|
||||
|
||||
err = s.pool.RemoveContainerByName(oidcMockHostname)
|
||||
if err != nil {
|
||||
s.FailNow(
|
||||
fmt.Sprintf(
|
||||
"Could not remove existing container before building test: %s",
|
||||
err,
|
||||
),
|
||||
"",
|
||||
)
|
||||
}
|
||||
|
||||
if pmockoidc, err := s.pool.BuildAndRunWithBuildOptions(
|
||||
headscaleBuildOptions,
|
||||
mockOidcOptions,
|
||||
DockerRestartPolicy); err == nil {
|
||||
s.mockOidc = *pmockoidc
|
||||
} else {
|
||||
s.FailNow(fmt.Sprintf("Could not start mockOIDC container: %s", err), "")
|
||||
}
|
||||
|
||||
s.Suite.T().Logf("Waiting for headscale mock oidc to be ready for tests")
|
||||
hostEndpoint := fmt.Sprintf(
|
||||
"%s:%s",
|
||||
s.mockOidc.GetIPInNetwork(&s.network),
|
||||
s.mockOidc.GetPort("10000/tcp"),
|
||||
)
|
||||
|
||||
if err := s.pool.Retry(func() error {
|
||||
url := fmt.Sprintf("http://%s/oidc/.well-known/openid-configuration", hostEndpoint)
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
log.Printf("headscale mock OIDC tests is not ready: %s\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("status code not OK")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
// TODO(kradalby): If we cannot access headscale, or any other fatal error during
|
||||
// test setup, we need to abort and tear down. However, testify does not seem to
|
||||
// support that at the moment:
|
||||
// https://github.com/stretchr/testify/issues/849
|
||||
return // fmt.Errorf("Could not connect to headscale: %s", err)
|
||||
}
|
||||
s.Suite.T().Log("headscale-mock-oidc container is ready for embedded OIDC tests")
|
||||
|
||||
oidcCfg := fmt.Sprintf(`
|
||||
oidc:
|
||||
issuer: http://%s:10000/oidc
|
||||
client_id: superclient
|
||||
client_secret: supersecret
|
||||
strip_email_domain: true`, s.mockOidc.GetIPInNetwork(&s.network))
|
||||
|
||||
currentPath, err := os.Getwd()
|
||||
if err != nil {
|
||||
s.FailNow(fmt.Sprintf("Could not determine current path: %s", err), "")
|
||||
}
|
||||
|
||||
baseConfig, err := os.ReadFile(
|
||||
path.Join(currentPath, "integration_test/etc_oidc/base_config.yaml"))
|
||||
if err != nil {
|
||||
s.FailNow(fmt.Sprintf("Could not read base config: %s", err), "")
|
||||
}
|
||||
config := string(baseConfig) + oidcCfg
|
||||
|
||||
log.Println(config)
|
||||
|
||||
configPath := path.Join(currentPath, "integration_test/etc_oidc/config.yaml")
|
||||
err = os.WriteFile(configPath, []byte(config), 0o644)
|
||||
if err != nil {
|
||||
s.FailNow(fmt.Sprintf("Could not write config: %s", err), "")
|
||||
}
|
||||
|
||||
headscaleOptions := &dockertest.RunOptions{
|
||||
Name: oidcHeadscaleHostname,
|
||||
Networks: []*dockertest.Network{&s.network},
|
||||
Mounts: []string{
|
||||
path.Join(currentPath,
|
||||
"integration_test/etc_oidc:/etc/headscale",
|
||||
),
|
||||
},
|
||||
Cmd: []string{"headscale", "serve"},
|
||||
ExposedPorts: []string{"8443/tcp", "3478/udp"},
|
||||
PortBindings: map[docker.Port][]docker.PortBinding{
|
||||
"8443/tcp": {{HostPort: "8443"}},
|
||||
"3478/udp": {{HostPort: "3478"}},
|
||||
},
|
||||
}
|
||||
|
||||
err = s.pool.RemoveContainerByName(oidcHeadscaleHostname)
|
||||
if err != nil {
|
||||
s.FailNow(
|
||||
fmt.Sprintf(
|
||||
"Could not remove existing container before building test: %s",
|
||||
err,
|
||||
),
|
||||
"",
|
||||
)
|
||||
}
|
||||
|
||||
s.Suite.T().Logf("Creating headscale container for OIDC integration tests")
|
||||
if pheadscale, err := s.pool.BuildAndRunWithBuildOptions(headscaleBuildOptions, headscaleOptions, DockerRestartPolicy); err == nil {
|
||||
s.headscale = *pheadscale
|
||||
} else {
|
||||
s.FailNow(fmt.Sprintf("Could not start headscale container: %s", err), "")
|
||||
}
|
||||
s.Suite.T().Logf("Created headscale container for embedded OIDC tests")
|
||||
|
||||
s.Suite.T().Logf("Creating tailscale containers for embedded OIDC tests")
|
||||
|
||||
for i := 0; i < totalOidcContainers; i++ {
|
||||
version := tailscaleVersions[i%len(tailscaleVersions)]
|
||||
hostname, container := s.tailscaleContainer(
|
||||
fmt.Sprint(i),
|
||||
version,
|
||||
)
|
||||
s.tailscales[hostname] = *container
|
||||
}
|
||||
|
||||
s.Suite.T().Logf("Waiting for headscale to be ready for embedded OIDC tests")
|
||||
hostMockEndpoint := fmt.Sprintf(
|
||||
"%s:%s",
|
||||
s.headscale.GetIPInNetwork(&s.network),
|
||||
s.headscale.GetPort("8443/tcp"),
|
||||
)
|
||||
|
||||
if err := s.pool.Retry(func() error {
|
||||
url := fmt.Sprintf("https://%s/health", hostMockEndpoint)
|
||||
insecureTransport := http.DefaultTransport.(*http.Transport).Clone()
|
||||
insecureTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
client := &http.Client{Transport: insecureTransport}
|
||||
resp, err := client.Get(url)
|
||||
if err != nil {
|
||||
log.Printf("headscale for embedded OIDC tests is not ready: %s\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("status code not OK")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
// TODO(kradalby): If we cannot access headscale, or any other fatal error during
|
||||
// test setup, we need to abort and tear down. However, testify does not seem to
|
||||
// support that at the moment:
|
||||
// https://github.com/stretchr/testify/issues/849
|
||||
return // fmt.Errorf("Could not connect to headscale: %s", err)
|
||||
}
|
||||
s.Suite.T().Log("headscale container is ready for embedded OIDC tests")
|
||||
|
||||
s.Suite.T().Logf("Creating headscale namespace: %s\n", oidcNamespaceName)
|
||||
result, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{"headscale", "namespaces", "create", oidcNamespaceName},
|
||||
[]string{},
|
||||
)
|
||||
log.Println("headscale create namespace result: ", result)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
headscaleEndpoint := fmt.Sprintf(
|
||||
"https://headscale:%s",
|
||||
s.headscale.GetPort("8443/tcp"),
|
||||
)
|
||||
|
||||
log.Printf(
|
||||
"Joining tailscale containers to headscale at %s\n",
|
||||
headscaleEndpoint,
|
||||
)
|
||||
for hostname, tailscale := range s.tailscales {
|
||||
s.joinWaitGroup.Add(1)
|
||||
go s.AuthenticateOIDC(headscaleEndpoint, hostname, tailscale)
|
||||
|
||||
// TODO(juan): Workaround for https://github.com/juanfont/headscale/issues/814
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
s.joinWaitGroup.Wait()
|
||||
|
||||
// The nodes need a bit of time to get their updated maps from headscale
|
||||
// TODO: See if we can have a more deterministic wait here.
|
||||
time.Sleep(60 * time.Second)
|
||||
}
|
||||
|
||||
func (s *IntegrationOIDCTestSuite) AuthenticateOIDC(
|
||||
endpoint, hostname string,
|
||||
tailscale dockertest.Resource,
|
||||
) {
|
||||
defer s.joinWaitGroup.Done()
|
||||
|
||||
loginURL, err := s.joinOIDC(endpoint, hostname, tailscale)
|
||||
if err != nil {
|
||||
s.FailNow(fmt.Sprintf("Could not join OIDC node: %s", err), "")
|
||||
}
|
||||
|
||||
insecureTransport := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
client := &http.Client{Transport: insecureTransport}
|
||||
resp, err := client.Get(loginURL.String())
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
log.Printf("auth body, err: %#v, %s", resp, err)
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
if err != nil {
|
||||
s.FailNow(fmt.Sprintf("Could not read login page: %s", err), "")
|
||||
}
|
||||
|
||||
log.Printf("Login page for %s: %s", hostname, string(body))
|
||||
}
|
||||
|
||||
func (s *IntegrationOIDCTestSuite) joinOIDC(
|
||||
endpoint, hostname string,
|
||||
tailscale dockertest.Resource,
|
||||
) (*url.URL, error) {
|
||||
command := []string{
|
||||
"tailscale",
|
||||
"up",
|
||||
"-login-server",
|
||||
endpoint,
|
||||
"--hostname",
|
||||
hostname,
|
||||
}
|
||||
|
||||
log.Println("Join command:", command)
|
||||
log.Printf("Running join command for %s\n", hostname)
|
||||
_, stderr, _ := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
|
||||
// This piece of code just gets the login URL out of the stderr of the tailscale client.
|
||||
// See https://github.com/tailscale/tailscale/blob/main/cmd/tailscale/cli/up.go#L584.
|
||||
urlStr := strings.ReplaceAll(stderr, "\nTo authenticate, visit:\n\n\t", "")
|
||||
urlStr = strings.TrimSpace(urlStr)
|
||||
|
||||
// parse URL
|
||||
loginUrl, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
log.Printf("Could not parse login URL: %s", err)
|
||||
log.Printf("Original join command result: %s", stderr)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return loginUrl, nil
|
||||
}
|
||||
|
||||
func (s *IntegrationOIDCTestSuite) tailscaleContainer(
|
||||
identifier, version string,
|
||||
) (string, *dockertest.Resource) {
|
||||
tailscaleBuildOptions := getDockerBuildOptions(version)
|
||||
|
||||
hostname := fmt.Sprintf(
|
||||
"tailscale-%s-%s",
|
||||
strings.Replace(version, ".", "-", -1),
|
||||
identifier,
|
||||
)
|
||||
tailscaleOptions := &dockertest.RunOptions{
|
||||
Name: hostname,
|
||||
Networks: []*dockertest.Network{&s.network},
|
||||
Cmd: []string{
|
||||
"tailscaled", "--tun=tsdev",
|
||||
},
|
||||
|
||||
// expose the host IP address, so we can access it from inside the container
|
||||
ExtraHosts: []string{
|
||||
"host.docker.internal:host-gateway",
|
||||
"headscale:host-gateway",
|
||||
},
|
||||
}
|
||||
|
||||
pts, err := s.pool.BuildAndRunWithBuildOptions(
|
||||
tailscaleBuildOptions,
|
||||
tailscaleOptions,
|
||||
DockerRestartPolicy,
|
||||
DockerAllowLocalIPv6,
|
||||
DockerAllowNetworkAdministration,
|
||||
)
|
||||
if err != nil {
|
||||
s.FailNow(
|
||||
fmt.Sprintf(
|
||||
"Could not start tailscale container version %s: %s",
|
||||
version,
|
||||
err,
|
||||
),
|
||||
)
|
||||
}
|
||||
log.Printf("Created %s container\n", hostname)
|
||||
|
||||
return hostname, pts
|
||||
}
|
||||
|
||||
func (s *IntegrationOIDCTestSuite) TearDownSuite() {
|
||||
if !s.saveLogs {
|
||||
for _, tailscale := range s.tailscales {
|
||||
if err := s.pool.Purge(&tailscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.pool.Purge(&s.headscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
|
||||
if err := s.pool.Purge(&s.mockOidc); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
|
||||
if err := s.network.Close(); err != nil {
|
||||
log.Printf("Could not close network: %s\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationOIDCTestSuite) HandleStats(
|
||||
suiteName string,
|
||||
stats *suite.SuiteInformation,
|
||||
) {
|
||||
s.stats = stats
|
||||
}
|
||||
|
||||
func (s *IntegrationOIDCTestSuite) saveLog(
|
||||
resource *dockertest.Resource,
|
||||
basePath string,
|
||||
) error {
|
||||
err := os.MkdirAll(basePath, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var stdout bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
|
||||
err = s.pool.Client.Logs(
|
||||
docker.LogsOptions{
|
||||
Context: context.TODO(),
|
||||
Container: resource.Container.ID,
|
||||
OutputStream: &stdout,
|
||||
ErrorStream: &stderr,
|
||||
Tail: "all",
|
||||
RawTerminal: false,
|
||||
Stdout: true,
|
||||
Stderr: true,
|
||||
Follow: false,
|
||||
Timestamps: false,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("Saving logs for %s to %s\n", resource.Container.Name, basePath)
|
||||
|
||||
err = os.WriteFile(
|
||||
path.Join(basePath, resource.Container.Name+".stdout.log"),
|
||||
[]byte(stdout.String()),
|
||||
0o644,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = os.WriteFile(
|
||||
path.Join(basePath, resource.Container.Name+".stderr.log"),
|
||||
[]byte(stdout.String()),
|
||||
0o644,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *IntegrationOIDCTestSuite) TestPingAllPeersByAddress() {
|
||||
for hostname, tailscale := range s.tailscales {
|
||||
ips, err := getIPs(s.tailscales)
|
||||
assert.Nil(s.T(), err)
|
||||
for peername, peerIPs := range ips {
|
||||
for i, ip := range peerIPs {
|
||||
// We currently cant ping ourselves, so skip that.
|
||||
if peername == hostname {
|
||||
continue
|
||||
}
|
||||
s.T().
|
||||
Run(fmt.Sprintf("%s-%s-%d", hostname, peername, i), func(t *testing.T) {
|
||||
// We are only interested in "direct ping" which means what we
|
||||
// might need a couple of more attempts before reaching the node.
|
||||
command := []string{
|
||||
"tailscale", "ping",
|
||||
"--timeout=1s",
|
||||
"--c=10",
|
||||
"--until-direct=true",
|
||||
ip.String(),
|
||||
}
|
||||
|
||||
log.Printf(
|
||||
"Pinging from %s to %s (%s)\n",
|
||||
hostname,
|
||||
peername,
|
||||
ip,
|
||||
)
|
||||
stdout, stderr, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
log.Printf(
|
||||
"result for %s: stdout: %s, stderr: %s\n",
|
||||
hostname,
|
||||
stdout,
|
||||
stderr,
|
||||
)
|
||||
assert.Contains(t, stdout, "pong")
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,713 +0,0 @@
|
||||
//go:build integration
|
||||
// +build integration
|
||||
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/client/tailscale/apitype"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
)
|
||||
|
||||
type IntegrationTestSuite struct {
|
||||
suite.Suite
|
||||
stats *suite.SuiteInformation
|
||||
|
||||
pool dockertest.Pool
|
||||
network dockertest.Network
|
||||
headscale dockertest.Resource
|
||||
|
||||
namespaces map[string]TestNamespace
|
||||
|
||||
joinWaitGroup sync.WaitGroup
|
||||
}
|
||||
|
||||
func TestIntegrationTestSuite(t *testing.T) {
|
||||
s := new(IntegrationTestSuite)
|
||||
|
||||
s.namespaces = map[string]TestNamespace{
|
||||
"thisspace": {
|
||||
count: 15,
|
||||
tailscales: make(map[string]dockertest.Resource),
|
||||
},
|
||||
"otherspace": {
|
||||
count: 5,
|
||||
tailscales: make(map[string]dockertest.Resource),
|
||||
},
|
||||
}
|
||||
|
||||
suite.Run(t, s)
|
||||
|
||||
// HandleStats, which allows us to check if we passed and save logs
|
||||
// is called after TearDown, so we cannot tear down containers before
|
||||
// we have potentially saved the logs.
|
||||
for _, scales := range s.namespaces {
|
||||
for _, tailscale := range scales.tailscales {
|
||||
if err := s.pool.Purge(&tailscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !s.stats.Passed() {
|
||||
err := s.saveLog(&s.headscale, "test_output")
|
||||
if err != nil {
|
||||
log.Printf("Could not save log: %s\n", err)
|
||||
}
|
||||
}
|
||||
if err := s.pool.Purge(&s.headscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
|
||||
if err := s.network.Close(); err != nil {
|
||||
log.Printf("Could not close network: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) saveLog(
|
||||
resource *dockertest.Resource,
|
||||
basePath string,
|
||||
) error {
|
||||
err := os.MkdirAll(basePath, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var stdout bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
|
||||
err = s.pool.Client.Logs(
|
||||
docker.LogsOptions{
|
||||
Context: context.TODO(),
|
||||
Container: resource.Container.ID,
|
||||
OutputStream: &stdout,
|
||||
ErrorStream: &stderr,
|
||||
Tail: "all",
|
||||
RawTerminal: false,
|
||||
Stdout: true,
|
||||
Stderr: true,
|
||||
Follow: false,
|
||||
Timestamps: false,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("Saving logs for %s to %s\n", resource.Container.Name, basePath)
|
||||
|
||||
err = ioutil.WriteFile(
|
||||
path.Join(basePath, resource.Container.Name+".stdout.log"),
|
||||
[]byte(stdout.String()),
|
||||
0o644,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(
|
||||
path.Join(basePath, resource.Container.Name+".stderr.log"),
|
||||
[]byte(stdout.String()),
|
||||
0o644,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) Join(
|
||||
endpoint, key, hostname string,
|
||||
tailscale dockertest.Resource,
|
||||
) {
|
||||
defer s.joinWaitGroup.Done()
|
||||
|
||||
command := []string{
|
||||
"tailscale",
|
||||
"up",
|
||||
"-login-server",
|
||||
endpoint,
|
||||
"--authkey",
|
||||
key,
|
||||
"--hostname",
|
||||
hostname,
|
||||
}
|
||||
|
||||
log.Println("Join command:", command)
|
||||
log.Printf("Running join command for %s\n", hostname)
|
||||
_, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
log.Printf("%s joined\n", hostname)
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) tailscaleContainer(
|
||||
namespace, identifier, version string,
|
||||
) (string, *dockertest.Resource) {
|
||||
tailscaleBuildOptions := getDockerBuildOptions(version)
|
||||
|
||||
hostname := fmt.Sprintf(
|
||||
"%s-tailscale-%s-%s",
|
||||
namespace,
|
||||
strings.Replace(version, ".", "-", -1),
|
||||
identifier,
|
||||
)
|
||||
tailscaleOptions := &dockertest.RunOptions{
|
||||
Name: hostname,
|
||||
Networks: []*dockertest.Network{&s.network},
|
||||
Cmd: []string{
|
||||
"tailscaled", "--tun=tsdev",
|
||||
},
|
||||
}
|
||||
|
||||
pts, err := s.pool.BuildAndRunWithBuildOptions(
|
||||
tailscaleBuildOptions,
|
||||
tailscaleOptions,
|
||||
DockerRestartPolicy,
|
||||
DockerAllowLocalIPv6,
|
||||
DockerAllowNetworkAdministration,
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not start tailscale container version %s: %s", version, err)
|
||||
}
|
||||
log.Printf("Created %s container\n", hostname)
|
||||
|
||||
return hostname, pts
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) SetupSuite() {
|
||||
var err error
|
||||
app = Headscale{
|
||||
dbType: "sqlite3",
|
||||
dbString: "integration_test_db.sqlite3",
|
||||
}
|
||||
|
||||
if ppool, err := dockertest.NewPool(""); err == nil {
|
||||
s.pool = *ppool
|
||||
} else {
|
||||
log.Fatalf("Could not connect to docker: %s", err)
|
||||
}
|
||||
|
||||
if pnetwork, err := s.pool.CreateNetwork("headscale-test"); err == nil {
|
||||
s.network = *pnetwork
|
||||
} else {
|
||||
log.Fatalf("Could not create network: %s", err)
|
||||
}
|
||||
|
||||
headscaleBuildOptions := &dockertest.BuildOptions{
|
||||
Dockerfile: "Dockerfile",
|
||||
ContextDir: ".",
|
||||
}
|
||||
|
||||
currentPath, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatalf("Could not determine current path: %s", err)
|
||||
}
|
||||
|
||||
headscaleOptions := &dockertest.RunOptions{
|
||||
Name: "headscale",
|
||||
Mounts: []string{
|
||||
fmt.Sprintf("%s/integration_test/etc:/etc/headscale", currentPath),
|
||||
},
|
||||
Networks: []*dockertest.Network{&s.network},
|
||||
Cmd: []string{"headscale", "serve"},
|
||||
}
|
||||
|
||||
log.Println("Creating headscale container")
|
||||
if pheadscale, err := s.pool.BuildAndRunWithBuildOptions(headscaleBuildOptions, headscaleOptions, DockerRestartPolicy); err == nil {
|
||||
s.headscale = *pheadscale
|
||||
} else {
|
||||
log.Fatalf("Could not start headscale container: %s", err)
|
||||
}
|
||||
log.Println("Created headscale container")
|
||||
|
||||
log.Println("Creating tailscale containers")
|
||||
for namespace, scales := range s.namespaces {
|
||||
for i := 0; i < scales.count; i++ {
|
||||
version := tailscaleVersions[i%len(tailscaleVersions)]
|
||||
|
||||
hostname, container := s.tailscaleContainer(
|
||||
namespace,
|
||||
fmt.Sprint(i),
|
||||
version,
|
||||
)
|
||||
scales.tailscales[hostname] = *container
|
||||
}
|
||||
}
|
||||
|
||||
log.Println("Waiting for headscale to be ready")
|
||||
hostEndpoint := fmt.Sprintf("localhost:%s", s.headscale.GetPort("8080/tcp"))
|
||||
|
||||
if err := s.pool.Retry(func() error {
|
||||
url := fmt.Sprintf("http://%s/health", hostEndpoint)
|
||||
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("status code not OK")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
// TODO(kradalby): If we cannot access headscale, or any other fatal error during
|
||||
// test setup, we need to abort and tear down. However, testify does not seem to
|
||||
// support that at the moment:
|
||||
// https://github.com/stretchr/testify/issues/849
|
||||
return // fmt.Errorf("Could not connect to headscale: %s", err)
|
||||
}
|
||||
log.Println("headscale container is ready")
|
||||
|
||||
for namespace, scales := range s.namespaces {
|
||||
log.Printf("Creating headscale namespace: %s\n", namespace)
|
||||
result, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{"headscale", "namespaces", "create", namespace},
|
||||
[]string{},
|
||||
)
|
||||
log.Println("headscale create namespace result: ", result)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
log.Printf("Creating pre auth key for %s\n", namespace)
|
||||
preAuthResult, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"--namespace",
|
||||
namespace,
|
||||
"preauthkeys",
|
||||
"create",
|
||||
"--reusable",
|
||||
"--expiration",
|
||||
"24h",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
[]string{"LOG_LEVEL=error"},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
var preAuthKey v1.PreAuthKey
|
||||
err = json.Unmarshal([]byte(preAuthResult), &preAuthKey)
|
||||
assert.Nil(s.T(), err)
|
||||
assert.True(s.T(), preAuthKey.Reusable)
|
||||
|
||||
headscaleEndpoint := "http://headscale:8080"
|
||||
|
||||
log.Printf(
|
||||
"Joining tailscale containers to headscale at %s\n",
|
||||
headscaleEndpoint,
|
||||
)
|
||||
for hostname, tailscale := range scales.tailscales {
|
||||
s.joinWaitGroup.Add(1)
|
||||
go s.Join(headscaleEndpoint, preAuthKey.Key, hostname, tailscale)
|
||||
}
|
||||
|
||||
s.joinWaitGroup.Wait()
|
||||
}
|
||||
|
||||
// The nodes need a bit of time to get their updated maps from headscale
|
||||
// TODO: See if we can have a more deterministic wait here.
|
||||
time.Sleep(60 * time.Second)
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) TearDownSuite() {
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) HandleStats(
|
||||
suiteName string,
|
||||
stats *suite.SuiteInformation,
|
||||
) {
|
||||
s.stats = stats
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) TestListNodes() {
|
||||
for namespace, scales := range s.namespaces {
|
||||
log.Println("Listing nodes")
|
||||
result, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{"headscale", "--namespace", namespace, "nodes", "list"},
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
log.Printf("List nodes: \n%s\n", result)
|
||||
|
||||
// Chck that the correct count of host is present in node list
|
||||
lines := strings.Split(result, "\n")
|
||||
assert.Equal(s.T(), len(scales.tailscales), len(lines)-2)
|
||||
|
||||
for hostname := range scales.tailscales {
|
||||
assert.Contains(s.T(), result, hostname)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) TestGetIpAddresses() {
|
||||
for _, scales := range s.namespaces {
|
||||
ips, err := getIPs(scales.tailscales)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
for hostname := range scales.tailscales {
|
||||
ips := ips[hostname]
|
||||
for _, ip := range ips {
|
||||
s.T().Run(hostname, func(t *testing.T) {
|
||||
assert.NotNil(t, ip)
|
||||
|
||||
log.Printf("IP for %s: %s\n", hostname, ip)
|
||||
|
||||
// c.Assert(ip.Valid(), check.IsTrue)
|
||||
assert.True(t, ip.Is4() || ip.Is6())
|
||||
switch {
|
||||
case ip.Is4():
|
||||
assert.True(t, IpPrefix4.Contains(ip))
|
||||
case ip.Is6():
|
||||
assert.True(t, IpPrefix6.Contains(ip))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(kradalby): fix this test
|
||||
// We need some way to import ipnstate.Status from multiple go packages.
|
||||
// Currently it will only work with 1.18.x since that is the last
|
||||
// version we have in go.mod
|
||||
// func (s *IntegrationTestSuite) TestStatus() {
|
||||
// for _, scales := range s.namespaces {
|
||||
// ips, err := getIPs(scales.tailscales)
|
||||
// assert.Nil(s.T(), err)
|
||||
//
|
||||
// for hostname, tailscale := range scales.tailscales {
|
||||
// s.T().Run(hostname, func(t *testing.T) {
|
||||
// command := []string{"tailscale", "status", "--json"}
|
||||
//
|
||||
// log.Printf("Getting status for %s\n", hostname)
|
||||
// result, err := ExecuteCommand(
|
||||
// &tailscale,
|
||||
// command,
|
||||
// []string{},
|
||||
// )
|
||||
// assert.Nil(t, err)
|
||||
//
|
||||
// var status ipnstate.Status
|
||||
// err = json.Unmarshal([]byte(result), &status)
|
||||
// assert.Nil(s.T(), err)
|
||||
//
|
||||
// // TODO(kradalby): Replace this check with peer length of SAME namespace
|
||||
// // Check if we have as many nodes in status
|
||||
// // as we have IPs/tailscales
|
||||
// // lines := strings.Split(result, "\n")
|
||||
// // assert.Equal(t, len(ips), len(lines)-1)
|
||||
// // assert.Equal(t, len(scales.tailscales), len(lines)-1)
|
||||
//
|
||||
// peerIps := getIPsfromIPNstate(status)
|
||||
//
|
||||
// // Check that all hosts is present in all hosts status
|
||||
// for ipHostname, ip := range ips {
|
||||
// if hostname != ipHostname {
|
||||
// assert.Contains(t, peerIps, ip)
|
||||
// }
|
||||
// }
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
func getIPsfromIPNstate(status ipnstate.Status) []netaddr.IP {
|
||||
ips := make([]netaddr.IP, 0)
|
||||
|
||||
for _, peer := range status.Peer {
|
||||
ips = append(ips, peer.TailscaleIPs...)
|
||||
}
|
||||
|
||||
return ips
|
||||
}
|
||||
|
||||
// TODO: Adopt test for cross communication between namespaces
|
||||
func (s *IntegrationTestSuite) TestPingAllPeersByAddress() {
|
||||
for _, scales := range s.namespaces {
|
||||
ips, err := getIPs(scales.tailscales)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
for hostname, tailscale := range scales.tailscales {
|
||||
for peername, peerIPs := range ips {
|
||||
for i, ip := range peerIPs {
|
||||
// We currently cant ping ourselves, so skip that.
|
||||
if peername == hostname {
|
||||
continue
|
||||
}
|
||||
s.T().
|
||||
Run(fmt.Sprintf("%s-%s-%d", hostname, peername, i), func(t *testing.T) {
|
||||
// We are only interested in "direct ping" which means what we
|
||||
// might need a couple of more attempts before reaching the node.
|
||||
command := []string{
|
||||
"tailscale", "ping",
|
||||
"--timeout=1s",
|
||||
"--c=10",
|
||||
"--until-direct=true",
|
||||
ip.String(),
|
||||
}
|
||||
|
||||
log.Printf(
|
||||
"Pinging from %s to %s (%s)\n",
|
||||
hostname,
|
||||
peername,
|
||||
ip,
|
||||
)
|
||||
result, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
log.Printf("Result for %s: %s\n", hostname, result)
|
||||
assert.Contains(t, result, "pong")
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) TestTailDrop() {
|
||||
for _, scales := range s.namespaces {
|
||||
ips, err := getIPs(scales.tailscales)
|
||||
assert.Nil(s.T(), err)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
retry := func(times int, sleepInverval time.Duration, doWork func() error) (err error) {
|
||||
for attempts := 0; attempts < times; attempts++ {
|
||||
err = doWork()
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
time.Sleep(sleepInverval)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
for hostname, tailscale := range scales.tailscales {
|
||||
command := []string{"touch", fmt.Sprintf("/tmp/file_from_%s", hostname)}
|
||||
_, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
for peername := range ips {
|
||||
if peername == hostname {
|
||||
continue
|
||||
}
|
||||
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
|
||||
command := []string{
|
||||
"tailscale", "file", "cp",
|
||||
fmt.Sprintf("/tmp/file_from_%s", hostname),
|
||||
fmt.Sprintf("%s:", peername),
|
||||
}
|
||||
retry(10, 1*time.Second, func() error {
|
||||
log.Printf(
|
||||
"Sending file from %s to %s\n",
|
||||
hostname,
|
||||
peername,
|
||||
)
|
||||
_, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
ExecuteCommandTimeout(60*time.Second),
|
||||
)
|
||||
return err
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for hostname, tailscale := range scales.tailscales {
|
||||
command := []string{
|
||||
"tailscale", "file",
|
||||
"get",
|
||||
"/tmp/",
|
||||
}
|
||||
_, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
for peername, ip := range ips {
|
||||
if peername == hostname {
|
||||
continue
|
||||
}
|
||||
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
|
||||
command := []string{
|
||||
"ls",
|
||||
fmt.Sprintf("/tmp/file_from_%s", peername),
|
||||
}
|
||||
log.Printf(
|
||||
"Checking file in %s (%s) from %s (%s)\n",
|
||||
hostname,
|
||||
ips[hostname],
|
||||
peername,
|
||||
ip,
|
||||
)
|
||||
result, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
log.Printf("Result for %s: %s\n", peername, result)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("/tmp/file_from_%s\n", peername),
|
||||
result,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) TestPingAllPeersByHostname() {
|
||||
for namespace, scales := range s.namespaces {
|
||||
ips, err := getIPs(scales.tailscales)
|
||||
assert.Nil(s.T(), err)
|
||||
for hostname, tailscale := range scales.tailscales {
|
||||
for peername := range ips {
|
||||
if peername == hostname {
|
||||
continue
|
||||
}
|
||||
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
|
||||
command := []string{
|
||||
"tailscale", "ping",
|
||||
"--timeout=10s",
|
||||
"--c=20",
|
||||
"--until-direct=true",
|
||||
fmt.Sprintf("%s.%s.headscale.net", peername, namespace),
|
||||
}
|
||||
|
||||
log.Printf(
|
||||
"Pinging using hostname from %s to %s\n",
|
||||
hostname,
|
||||
peername,
|
||||
)
|
||||
result, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
log.Printf("Result for %s: %s\n", hostname, result)
|
||||
assert.Contains(t, result, "pong")
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) TestMagicDNS() {
|
||||
for namespace, scales := range s.namespaces {
|
||||
ips, err := getIPs(scales.tailscales)
|
||||
assert.Nil(s.T(), err)
|
||||
for hostname, tailscale := range scales.tailscales {
|
||||
for peername, ips := range ips {
|
||||
if peername == hostname {
|
||||
continue
|
||||
}
|
||||
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
|
||||
command := []string{
|
||||
"tailscale", "ip",
|
||||
fmt.Sprintf("%s.%s.headscale.net", peername, namespace),
|
||||
}
|
||||
|
||||
log.Printf(
|
||||
"Resolving name %s from %s\n",
|
||||
peername,
|
||||
hostname,
|
||||
)
|
||||
result, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
log.Printf("Result for %s: %s\n", hostname, result)
|
||||
|
||||
for _, ip := range ips {
|
||||
assert.Contains(t, result, ip.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getAPIURLs(
|
||||
tailscales map[string]dockertest.Resource,
|
||||
) (map[netaddr.IP]string, error) {
|
||||
fts := make(map[netaddr.IP]string)
|
||||
for _, tailscale := range tailscales {
|
||||
command := []string{
|
||||
"curl",
|
||||
"--unix-socket",
|
||||
"/run/tailscale/tailscaled.sock",
|
||||
"http://localhost/localapi/v0/file-targets",
|
||||
}
|
||||
result, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pft []apitype.FileTarget
|
||||
if err := json.Unmarshal([]byte(result), &pft); err != nil {
|
||||
return nil, fmt.Errorf("invalid JSON: %w", err)
|
||||
}
|
||||
for _, ft := range pft {
|
||||
n := ft.Node
|
||||
for _, a := range n.Addresses { // just add all the addresses
|
||||
if _, ok := fts[a.IP()]; !ok {
|
||||
if ft.PeerAPIURL == "" {
|
||||
return nil, errors.New("api url is empty")
|
||||
}
|
||||
fts[a.IP()] = ft.PeerAPIURL
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fts, nil
|
||||
}
|
||||
54
integration_test/etc/alt-config.dump.gold.yaml
Normal file
54
integration_test/etc/alt-config.dump.gold.yaml
Normal file
@@ -0,0 +1,54 @@
|
||||
acl_policy_path: ""
|
||||
cli:
|
||||
insecure: false
|
||||
timeout: 5s
|
||||
db_path: /tmp/integration_test_db.sqlite3
|
||||
db_type: sqlite3
|
||||
derp:
|
||||
auto_update_enabled: false
|
||||
server:
|
||||
enabled: false
|
||||
stun:
|
||||
enabled: true
|
||||
update_frequency: 1m
|
||||
urls:
|
||||
- https://controlplane.tailscale.com/derpmap/default
|
||||
dns_config:
|
||||
override_local_dns: true
|
||||
base_domain: headscale.net
|
||||
domains: []
|
||||
magic_dns: true
|
||||
nameservers:
|
||||
- 127.0.0.11
|
||||
- 1.1.1.1
|
||||
ephemeral_node_inactivity_timeout: 30m
|
||||
node_update_check_interval: 10s
|
||||
grpc_allow_insecure: false
|
||||
grpc_listen_addr: :50443
|
||||
ip_prefixes:
|
||||
- fd7a:115c:a1e0::/48
|
||||
- 100.64.0.0/10
|
||||
listen_addr: 0.0.0.0:18080
|
||||
log:
|
||||
level: disabled
|
||||
format: text
|
||||
logtail:
|
||||
enabled: false
|
||||
metrics_listen_addr: 127.0.0.1:19090
|
||||
oidc:
|
||||
only_start_if_oidc_is_available: true
|
||||
scope:
|
||||
- openid
|
||||
- profile
|
||||
- email
|
||||
strip_email_domain: true
|
||||
private_key_path: private.key
|
||||
noise:
|
||||
private_key_path: noise_private.key
|
||||
server_url: http://headscale:18080
|
||||
tls_client_auth_mode: relaxed
|
||||
tls_letsencrypt_cache_dir: /var/www/.cache
|
||||
tls_letsencrypt_challenge_type: HTTP-01
|
||||
unix_socket: /var/run/headscale.sock
|
||||
unix_socket_permission: "0o770"
|
||||
randomize_client_port: false
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user