mirror of
https://github.com/juanfont/headscale.git
synced 2026-01-11 20:00:28 +01:00
Compare commits
568 Commits
0.16.x
...
ko-fi-link
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4b4200e8a5 | ||
|
|
b7d7fc57c4 | ||
|
|
b54c0e3d22 | ||
|
|
593040b73d | ||
|
|
6e890afc5f | ||
|
|
2afba0233b | ||
|
|
91900b7310 | ||
|
|
55b198a16a | ||
|
|
ca37dc6268 | ||
|
|
000c02dad9 | ||
|
|
4532915be1 | ||
|
|
4b8d6e7c64 | ||
|
|
579c5827b3 | ||
|
|
01628f76ff | ||
|
|
53858a32f1 | ||
|
|
2bf576ea8a | ||
|
|
1faac0b3d7 | ||
|
|
134c72f4fb | ||
|
|
70f2f5d750 | ||
|
|
4453728614 | ||
|
|
34107f9a0f | ||
|
|
52862b8a22 | ||
|
|
946d38e5d7 | ||
|
|
78819be03c | ||
|
|
34631dfcf5 | ||
|
|
8fa9755b55 | ||
|
|
1b557ac1ea | ||
|
|
8170f5e693 | ||
|
|
a506d0fcc8 | ||
|
|
6718ff71d3 | ||
|
|
b62acff2e3 | ||
|
|
ac8bff716d | ||
|
|
fba77de4eb | ||
|
|
d1bca105ef | ||
|
|
6c2d6fa302 | ||
|
|
1015bc3e02 | ||
|
|
68c72d03b5 | ||
|
|
bd4b2da06e | ||
|
|
7b8cf5ef1a | ||
|
|
638a3d48ec | ||
|
|
4de676c64e | ||
|
|
a58a552f0e | ||
|
|
0db16c7bbe | ||
|
|
06f7e7cfd8 | ||
|
|
19f12f94c0 | ||
|
|
95d3062c21 | ||
|
|
86fa136a63 | ||
|
|
89c12072ba | ||
|
|
54f701ff92 | ||
|
|
5a70ea7326 | ||
|
|
63cd3122e6 | ||
|
|
6f4c6c1876 | ||
|
|
eb072a1a74 | ||
|
|
36b8862e7c | ||
|
|
d4e3bf184b | ||
|
|
c28ca27133 | ||
|
|
c02e105065 | ||
|
|
22da5bfc1d | ||
|
|
c6d31747f7 | ||
|
|
91ed6e2197 | ||
|
|
d71aef3b98 | ||
|
|
8a79c2e7ed | ||
|
|
f34e7c341b | ||
|
|
e28d308796 | ||
|
|
f610be632e | ||
|
|
fd6d25b5c1 | ||
|
|
3695284286 | ||
|
|
cfaa36e51a | ||
|
|
d207c30949 | ||
|
|
519f22f9bf | ||
|
|
52a323b90d | ||
|
|
91559d0558 | ||
|
|
25195b8d73 | ||
|
|
e69176e200 | ||
|
|
d29d0222af | ||
|
|
72b9803a08 | ||
|
|
99e33181b2 | ||
|
|
e7f322b9b6 | ||
|
|
1d36e1775f | ||
|
|
0525bea593 | ||
|
|
2770c7cc07 | ||
|
|
1b0e80bb10 | ||
|
|
4ccc528d96 | ||
|
|
6a311f4ab6 | ||
|
|
a49a405413 | ||
|
|
24f946e2e9 | ||
|
|
c3cdb340de | ||
|
|
935319a218 | ||
|
|
4c7e15a7ce | ||
|
|
d461097247 | ||
|
|
f90a3c196c | ||
|
|
751cc173d4 | ||
|
|
ff134f2b8e | ||
|
|
6d3ede1367 | ||
|
|
c0884f94b8 | ||
|
|
3d8dd68b14 | ||
|
|
b02e88364e | ||
|
|
9790831afb | ||
|
|
2d79179141 | ||
|
|
275cc28193 | ||
|
|
c5ba7552c5 | ||
|
|
8909f801bb | ||
|
|
3d4af52b3a | ||
|
|
6391555dab | ||
|
|
8cc5b2174b | ||
|
|
9269dd01f5 | ||
|
|
ef68f17a96 | ||
|
|
f74266f8f8 | ||
|
|
46df219ed3 | ||
|
|
835288d864 | ||
|
|
93d56362af | ||
|
|
4799859be0 | ||
|
|
8e44596171 | ||
|
|
d479234058 | ||
|
|
3fc5866de0 | ||
|
|
f3c40086ac | ||
|
|
09ed21edd8 | ||
|
|
456479eaa1 | ||
|
|
cb87852825 | ||
|
|
69440058bb | ||
|
|
9bc6ac0f35 | ||
|
|
89ff5c83d2 | ||
|
|
0a47d694be | ||
|
|
73c84d4f6a | ||
|
|
a9251d6652 | ||
|
|
f9c44f11d6 | ||
|
|
1f8bd24a0d | ||
|
|
7bf2eb3d71 | ||
|
|
f5a5437917 | ||
|
|
9989657c0f | ||
|
|
cb2790984f | ||
|
|
18c0009a51 | ||
|
|
d038df2a88 | ||
|
|
d8e9d95a3b | ||
|
|
0e405c7ce0 | ||
|
|
21f0e089b6 | ||
|
|
cfda804726 | ||
|
|
d6b383dd2f | ||
|
|
07f92e647c | ||
|
|
bf87b33292 | ||
|
|
527b580f5e | ||
|
|
c31328a54a | ||
|
|
b2c0e37122 | ||
|
|
889223e35f | ||
|
|
6e83b7f06b | ||
|
|
31d427b655 | ||
|
|
d8c856e602 | ||
|
|
aad4c90fe6 | ||
|
|
4f9fe93146 | ||
|
|
96fe6aa3a1 | ||
|
|
947e961a3a | ||
|
|
43731cad2e | ||
|
|
ac15b21720 | ||
|
|
dfc03a6124 | ||
|
|
8a07381e3a | ||
|
|
0cf9c4ce8e | ||
|
|
e8b3de494e | ||
|
|
21ec543d37 | ||
|
|
ca8bca98ed | ||
|
|
4e8b95e6cd | ||
|
|
ad31378d92 | ||
|
|
3a6257b193 | ||
|
|
fafa3f8211 | ||
|
|
62e3fa0011 | ||
|
|
94ad0a1555 | ||
|
|
c1c22a4b51 | ||
|
|
611f7c374c | ||
|
|
91c0a153b0 | ||
|
|
73eae8e2cf | ||
|
|
341db0c5c9 | ||
|
|
2ca286ee8c | ||
|
|
dde39aa24c | ||
|
|
bcdd34b01e | ||
|
|
e45ba37ec5 | ||
|
|
d69a5f621e | ||
|
|
7f69b08bc8 | ||
|
|
5d3c02702b | ||
|
|
1469425484 | ||
|
|
0e12b66706 | ||
|
|
7e6ab19270 | ||
|
|
5013187aaf | ||
|
|
239ef16ad1 | ||
|
|
cb61a490e0 | ||
|
|
2c0488da0b | ||
|
|
a647e6af24 | ||
|
|
fe4e05b0bc | ||
|
|
54e3a0d372 | ||
|
|
e7e2c7804b | ||
|
|
5c9c4f27fe | ||
|
|
21b06f603a | ||
|
|
a14f482ef7 | ||
|
|
86c132c8b2 | ||
|
|
2b10226618 | ||
|
|
23a0946e76 | ||
|
|
7015d72911 | ||
|
|
76689c221d | ||
|
|
8d46986a87 | ||
|
|
b22e628b49 | ||
|
|
9c30939e3f | ||
|
|
018b1d68f2 | ||
|
|
ae189c03ac | ||
|
|
7155b22043 | ||
|
|
40c048fb45 | ||
|
|
53b4bb220d | ||
|
|
d706c3516d | ||
|
|
cbbf9fbdef | ||
|
|
d8144ee2ed | ||
|
|
fa3d21cbc0 | ||
|
|
d242ceac46 | ||
|
|
ecce82d44a | ||
|
|
463180cc2e | ||
|
|
129afdb157 | ||
|
|
701f990a23 | ||
|
|
e112514a3b | ||
|
|
babd303667 | ||
|
|
2d170fe339 | ||
|
|
bc1c1f5ce8 | ||
|
|
830d59fe8c | ||
|
|
c9823ce347 | ||
|
|
8c4744acd9 | ||
|
|
9c16d5e511 | ||
|
|
40b3de9894 | ||
|
|
1eea9c943c | ||
|
|
399c3255ab | ||
|
|
852cb90fcc | ||
|
|
587a016b46 | ||
|
|
b2bca2ac81 | ||
|
|
6d8c18d4de | ||
|
|
12ee9bc02d | ||
|
|
8502a0acda | ||
|
|
36ad0003a9 | ||
|
|
4cb7d63e8b | ||
|
|
2bf50bc205 | ||
|
|
39bc6f7e01 | ||
|
|
0db608a7b7 | ||
|
|
3951f39868 | ||
|
|
c90d0dd843 | ||
|
|
84f9f604b0 | ||
|
|
aef77a113c | ||
|
|
13aa845c69 | ||
|
|
b0a4ee4dfe | ||
|
|
25e39d9ff9 | ||
|
|
f109b54e79 | ||
|
|
eda4321486 | ||
|
|
a9c3b14f79 | ||
|
|
f68ba7504f | ||
|
|
b331e3f736 | ||
|
|
308b9e78a1 | ||
|
|
fa8b02a83f | ||
|
|
a39504510a | ||
|
|
2f36a11a8e | ||
|
|
4df47de3f2 | ||
|
|
dfadb965b7 | ||
|
|
c6f82c3646 | ||
|
|
32c21a05f8 | ||
|
|
79864e0165 | ||
|
|
06e12f7020 | ||
|
|
3659461666 | ||
|
|
e96bceed4c | ||
|
|
ff217ccce8 | ||
|
|
4dd2eef5d1 | ||
|
|
907aa07e51 | ||
|
|
0048ed07a2 | ||
|
|
88d12873c5 | ||
|
|
9f58eebfe1 | ||
|
|
cf40d2a892 | ||
|
|
21dd212349 | ||
|
|
073308f1a3 | ||
|
|
03194e2d66 | ||
|
|
f18e22224c | ||
|
|
8ee35c9c22 | ||
|
|
d900f48d38 | ||
|
|
a846e13c78 | ||
|
|
ed2236aa24 | ||
|
|
a94ed0586e | ||
|
|
22cabc16d7 | ||
|
|
88931001fd | ||
|
|
f3dbfc9045 | ||
|
|
85df2c80a8 | ||
|
|
aca3a667c4 | ||
|
|
a0ec3690b6 | ||
|
|
37a4d41d0e | ||
|
|
382a37f1e1 | ||
|
|
201f81ce00 | ||
|
|
4904ccc3c3 | ||
|
|
6b67584d47 | ||
|
|
d575dac73a | ||
|
|
5333df283a | ||
|
|
d56ad2917d | ||
|
|
df36bcfd39 | ||
|
|
a3d3ad2208 | ||
|
|
0b0fb0af22 | ||
|
|
2aebd2927d | ||
|
|
c00e5599b0 | ||
|
|
72e2fa46c7 | ||
|
|
98f5b7f638 | ||
|
|
70ecda6fd1 | ||
|
|
5fe6538c02 | ||
|
|
84c4b0336f | ||
|
|
8fbba1ac94 | ||
|
|
1a30bcba91 | ||
|
|
ed58b2e4e2 | ||
|
|
5f975cbb50 | ||
|
|
81dd9b2386 | ||
|
|
9088521252 | ||
|
|
fc6a1e15fc | ||
|
|
94be5ca295 | ||
|
|
804d9d8196 | ||
|
|
d0e945fdd7 | ||
|
|
98e7842c26 | ||
|
|
24629895c7 | ||
|
|
256b6cb54d | ||
|
|
6b4d53315b | ||
|
|
fb25a06a66 | ||
|
|
dbe58e53e4 | ||
|
|
8dcc82ceb3 | ||
|
|
8be14ef6fe | ||
|
|
2bb34751d1 | ||
|
|
d06ba7b522 | ||
|
|
a507a04650 | ||
|
|
7761a7b23e | ||
|
|
6d2cfd52c5 | ||
|
|
75a8fc8b3e | ||
|
|
8fa05c1e72 | ||
|
|
93082b8092 | ||
|
|
d764f52f24 | ||
|
|
e5decbd0fa | ||
|
|
8a1c0e0e9b | ||
|
|
5b12ab9894 | ||
|
|
c52e3aafe6 | ||
|
|
a46170e2a1 | ||
|
|
aca1c1b156 | ||
|
|
09863b540d | ||
|
|
adb352e663 | ||
|
|
c9b39da6b9 | ||
|
|
6fe86dff00 | ||
|
|
9b1dcb2f0c | ||
|
|
22c68fff13 | ||
|
|
ddd92822b0 | ||
|
|
bd6282d1e3 | ||
|
|
7092a3ea47 | ||
|
|
695359862e | ||
|
|
95948e03c9 | ||
|
|
e286ba817b | ||
|
|
8aa0eefedd | ||
|
|
e6e5872b4b | ||
|
|
2c73f8ee62 | ||
|
|
cdc8bab7d9 | ||
|
|
f2928d7dcb | ||
|
|
44be239723 | ||
|
|
397754753f | ||
|
|
e87b470996 | ||
|
|
083d2a871c | ||
|
|
7a171cf5ea | ||
|
|
1563d7555f | ||
|
|
2e97119db8 | ||
|
|
b3a53bf642 | ||
|
|
a3f18f248c | ||
|
|
1c267f72e0 | ||
|
|
becf918b78 | ||
|
|
9c58395bb3 | ||
|
|
b117ca7720 | ||
|
|
d83a28bd1b | ||
|
|
42ef71bff9 | ||
|
|
f2da1a1665 | ||
|
|
356b76fc56 | ||
|
|
33ae56acfa | ||
|
|
9923adcb8b | ||
|
|
c21479cb9c | ||
|
|
3abca99b0c | ||
|
|
874d6aaf6b | ||
|
|
ae4f2cc4b5 | ||
|
|
dd155dca97 | ||
|
|
99307d1576 | ||
|
|
b2f3ffbc5a | ||
|
|
5774b32e55 | ||
|
|
41353a57c8 | ||
|
|
9c0cf4595a | ||
|
|
71b712356f | ||
|
|
f33e3e3b81 | ||
|
|
5f384c6323 | ||
|
|
e056b86c37 | ||
|
|
91e30397bd | ||
|
|
8a8ec7476d | ||
|
|
fca380587a | ||
|
|
cb70d7c705 | ||
|
|
b27b789e28 | ||
|
|
a9da953b55 | ||
|
|
12d5b6a2d2 | ||
|
|
a0a463494b | ||
|
|
07dca79b20 | ||
|
|
688cba7292 | ||
|
|
0fe3c21223 | ||
|
|
45df6e77ff | ||
|
|
548551c6ae | ||
|
|
e3f1fd1ffc | ||
|
|
470c49394c | ||
|
|
31662bcd28 | ||
|
|
7247302f45 | ||
|
|
1a5a5b12b7 | ||
|
|
0099dd1724 | ||
|
|
1f131c6729 | ||
|
|
fc4361b225 | ||
|
|
ce25a1e64e | ||
|
|
449a135b94 | ||
|
|
002d484abe | ||
|
|
9823ef2af5 | ||
|
|
641c6fd439 | ||
|
|
3a042471b7 | ||
|
|
dc18d64286 | ||
|
|
72a43007d8 | ||
|
|
842c28adff | ||
|
|
9810d84e2d | ||
|
|
f6153a9b5d | ||
|
|
302a88bfdb | ||
|
|
f6e83413e5 | ||
|
|
02ab3a2cb6 | ||
|
|
90e840c3c9 | ||
|
|
af60ffb7fa | ||
|
|
c28e559da4 | ||
|
|
5c59255b41 | ||
|
|
a377ee14b4 | ||
|
|
2262188d8a | ||
|
|
7c49c752a9 | ||
|
|
e29726cc50 | ||
|
|
3c73cbe92b | ||
|
|
cc357062be | ||
|
|
17c06f7167 | ||
|
|
d12e0156c3 | ||
|
|
204dedaa49 | ||
|
|
52073ce7c9 | ||
|
|
434747e007 | ||
|
|
7a78314d9d | ||
|
|
f23e9dc235 | ||
|
|
4527801d48 | ||
|
|
e0857f0226 | ||
|
|
0d074b1da6 | ||
|
|
f2fda4f906 | ||
|
|
c1c36036ae | ||
|
|
9a1438d2e3 | ||
|
|
582122851d | ||
|
|
f4d197485c | ||
|
|
68305df9b2 | ||
|
|
ca0be81833 | ||
|
|
380fbfe438 | ||
|
|
32d68a40d5 | ||
|
|
198e92c08f | ||
|
|
38b26f5285 | ||
|
|
096a009685 | ||
|
|
30c0fdb38d | ||
|
|
663dbf7395 | ||
|
|
373db0dc5e | ||
|
|
2733fb30cc | ||
|
|
d29411408b | ||
|
|
24bafdf2bb | ||
|
|
a9ede6a2bc | ||
|
|
2c5bf6982c | ||
|
|
dd3ec84000 | ||
|
|
84044e236d | ||
|
|
2ddf7ab515 | ||
|
|
f519c513c2 | ||
|
|
d5cc5b2bc8 | ||
|
|
51abf90db6 | ||
|
|
71410cb6da | ||
|
|
efb12f208c | ||
|
|
64ede5dbef | ||
|
|
7af78152a4 | ||
|
|
290ec8bb19 | ||
|
|
cdf48b1216 | ||
|
|
a24710a961 | ||
|
|
197da8afcb | ||
|
|
12385d4357 | ||
|
|
e7f8bb866f | ||
|
|
1ad19a3bd8 | ||
|
|
bb6b07dedc | ||
|
|
2403c0e198 | ||
|
|
ac18723dd4 | ||
|
|
6faa1d2e4a | ||
|
|
791272e408 | ||
|
|
e27a4db281 | ||
|
|
60cc9ddb3b | ||
|
|
7653ad40d6 | ||
|
|
004ebcaba1 | ||
|
|
cc0bec15ef | ||
|
|
20970b580a | ||
|
|
53857d418a | ||
|
|
a81a4d274f | ||
|
|
ce4a1cf447 | ||
|
|
35dd9209b9 | ||
|
|
81f91f03b4 | ||
|
|
84a5edf345 | ||
|
|
4aafe6c9d1 | ||
|
|
3ab1487641 | ||
|
|
71d22dc994 | ||
|
|
4424a9abc0 | ||
|
|
e20e818a42 | ||
|
|
f0a8a2857b | ||
|
|
175dfa1ede | ||
|
|
04e4fa785b | ||
|
|
6aec520889 | ||
|
|
e9906b522f | ||
|
|
2f554133c5 | ||
|
|
922b8b5365 | ||
|
|
c894db3dd4 | ||
|
|
e43713a866 | ||
|
|
b6e3cd81c6 | ||
|
|
43ad0d4416 | ||
|
|
a33b5a5c00 | ||
|
|
e2bffd4f5a | ||
|
|
a87a9636e3 | ||
|
|
9d430d3c72 | ||
|
|
f9a2a2b57a | ||
|
|
e4d961cfad | ||
|
|
67ffebc30a | ||
|
|
cf731fafab | ||
|
|
f43a83aad7 | ||
|
|
7185f8dfea | ||
|
|
8a707de5f1 | ||
|
|
ba07bac46a | ||
|
|
b71a881d0e | ||
|
|
ce53bb0eee | ||
|
|
c0fe1abf4d | ||
|
|
0db7fc5ab7 | ||
|
|
eb461d0713 | ||
|
|
128ec6717c | ||
|
|
b3cf5289f8 | ||
|
|
c701f9e817 | ||
|
|
865f1ffb3c | ||
|
|
8db7629edf | ||
|
|
b8980b9ed3 | ||
|
|
5cf9eedf42 | ||
|
|
f599bea216 | ||
|
|
704a19b0a5 | ||
|
|
e29b344e0f | ||
|
|
7cc227d01e | ||
|
|
df8ecdb603 | ||
|
|
f4bab6b290 | ||
|
|
35f3dee1d0 | ||
|
|
db89fdea23 | ||
|
|
d0898ecabc | ||
|
|
e640c6df05 | ||
|
|
ab18c721bb | ||
|
|
aaa33cf093 | ||
|
|
0f09e19e38 | ||
|
|
b301405f24 | ||
|
|
1f3032ad21 | ||
|
|
c10142f767 | ||
|
|
0d0042b7e6 | ||
|
|
78a179c971 | ||
|
|
cab828c9d4 | ||
|
|
ff46f3ff49 | ||
|
|
b67cff50f5 | ||
|
|
20d2615081 | ||
|
|
eb8d8f142c | ||
|
|
3bea20850a | ||
|
|
ade1b73779 | ||
|
|
281ae59b5a | ||
|
|
9994fce9d5 | ||
|
|
39b85b02bb | ||
|
|
7a91c82cda | ||
|
|
c7cea9ef16 | ||
|
|
1880035f6f | ||
|
|
fdd0c50402 | ||
|
|
be24bacb79 | ||
|
|
b261d19cfe | ||
|
|
014e7abc68 | ||
|
|
3e8f0e9984 | ||
|
|
6e8e2bf508 |
5
.github/FUNDING.yml
vendored
5
.github/FUNDING.yml
vendored
@@ -1,2 +1,3 @@
|
||||
ko_fi: kradalby
|
||||
github: [kradalby]
|
||||
# These are supported funding model platforms
|
||||
|
||||
ko_fi: headscale
|
||||
|
||||
4
.github/workflows/build.yml
vendored
4
.github/workflows/build.yml
vendored
@@ -13,13 +13,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v14.1
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
|
||||
2
.github/workflows/contributors.yml
vendored
2
.github/workflows/contributors.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
||||
add-contributors:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- name: Delete upstream contributor branch
|
||||
# Allow continue on failure to account for when the
|
||||
# upstream branch is deleted or does not exist.
|
||||
|
||||
23
.github/workflows/gh-actions-updater.yaml
vendored
Normal file
23
.github/workflows/gh-actions-updater.yaml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: GitHub Actions Version Updater
|
||||
|
||||
# Controls when the action will run.
|
||||
on:
|
||||
schedule:
|
||||
# Automatically run on every Sunday
|
||||
- cron: "0 0 * * 0"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
# [Required] Access token with `workflow` scope.
|
||||
token: ${{ secrets.WORKFLOW_SECRET }}
|
||||
|
||||
- name: Run GitHub Actions Version Updater
|
||||
uses: saadmk11/github-actions-version-updater@v0.7.1
|
||||
with:
|
||||
# [Required] Access token with `workflow` scope.
|
||||
token: ${{ secrets.WORKFLOW_SECRET }}
|
||||
8
.github/workflows/lint.yml
vendored
8
.github/workflows/lint.yml
vendored
@@ -1,5 +1,5 @@
|
||||
---
|
||||
name: CI
|
||||
name: Lint
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
@@ -7,13 +7,13 @@ jobs:
|
||||
golangci-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v14.1
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
version: v1.46.1
|
||||
version: v1.49.0
|
||||
|
||||
# Only block PRs on new problems.
|
||||
# If this is not enabled, we will end up having PRs
|
||||
|
||||
108
.github/workflows/release.yml
vendored
108
.github/workflows/release.yml
vendored
@@ -1,5 +1,5 @@
|
||||
---
|
||||
name: release
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -9,27 +9,17 @@ on:
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-18.04 # due to CGO we need to user an older version
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18.0
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install -y gcc-aarch64-linux-gnu
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: latest
|
||||
args: release --rm-dist
|
||||
- uses: cachix/install-nix-action@v16
|
||||
|
||||
- name: Run goreleaser
|
||||
run: nix develop --command -- goreleaser release --rm-dist
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
@@ -37,7 +27,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Docker Buildx
|
||||
@@ -65,8 +55,8 @@ jobs:
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=raw,value=latest
|
||||
type=sha
|
||||
type=raw,value=develop
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
@@ -100,7 +90,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Docker Buildx
|
||||
@@ -125,13 +115,13 @@ jobs:
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/headscale
|
||||
ghcr.io/${{ github.repository_owner }}/headscale
|
||||
flavor: |
|
||||
latest=false
|
||||
suffix=-debug,onlatest=true
|
||||
tags: |
|
||||
type=semver,pattern={{version}}-debug
|
||||
type=semver,pattern={{major}}.{{minor}}-debug
|
||||
type=semver,pattern={{major}}-debug
|
||||
type=raw,value=latest-debug
|
||||
type=sha,suffix=-debug
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=sha
|
||||
type=raw,value=develop
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
@@ -161,69 +151,3 @@ jobs:
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache-debug
|
||||
mv /tmp/.buildx-cache-debug-new /tmp/.buildx-cache-debug
|
||||
|
||||
docker-alpine-release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Set up QEMU for multiple platforms
|
||||
uses: docker/setup-qemu-action@master
|
||||
with:
|
||||
platforms: arm64,amd64
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache-alpine
|
||||
key: ${{ runner.os }}-buildx-alpine-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-alpine-
|
||||
- name: Docker meta
|
||||
id: meta-alpine
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
# list of Docker images to use as base name for tags
|
||||
images: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/headscale
|
||||
ghcr.io/${{ github.repository_owner }}/headscale
|
||||
flavor: |
|
||||
latest=false
|
||||
tags: |
|
||||
type=semver,pattern={{version}}-alpine
|
||||
type=semver,pattern={{major}}.{{minor}}-alpine
|
||||
type=semver,pattern={{major}}-alpine
|
||||
type=raw,value=latest-alpine
|
||||
type=sha,suffix=-alpine
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
push: true
|
||||
context: .
|
||||
file: Dockerfile.alpine
|
||||
tags: ${{ steps.meta-alpine.outputs.tags }}
|
||||
labels: ${{ steps.meta-alpine.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=local,src=/tmp/.buildx-cache-alpine
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-alpine-new
|
||||
build-args: |
|
||||
VERSION=${{ steps.meta-alpine.outputs.version }}
|
||||
- name: Prepare cache for next build
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache-alpine
|
||||
mv /tmp/.buildx-cache-alpine-new /tmp/.buildx-cache-alpine
|
||||
|
||||
27
.github/workflows/renovatebot.yml
vendored
27
.github/workflows/renovatebot.yml
vendored
@@ -1,27 +0,0 @@
|
||||
---
|
||||
name: Renovate
|
||||
on:
|
||||
schedule:
|
||||
- cron: "* * 5,20 * *" # Every 5th and 20th of the month
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
renovate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Get token
|
||||
id: get_token
|
||||
uses: machine-learning-apps/actions-app-token@master
|
||||
with:
|
||||
APP_PEM: ${{ secrets.RENOVATEBOT_SECRET }}
|
||||
APP_ID: ${{ secrets.RENOVATEBOT_APP_ID }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2.0.0
|
||||
|
||||
- name: Self-hosted Renovate
|
||||
uses: renovatebot/github-action@v31.81.3
|
||||
with:
|
||||
configurationFile: .github/renovate.json
|
||||
token: "x-access-token:${{ steps.get_token.outputs.app_token }}"
|
||||
# env:
|
||||
# LOG_LEVEL: "debug"
|
||||
35
.github/workflows/test-integration-cli.yml
vendored
Normal file
35
.github/workflows/test-integration-cli.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Integration Test CLI
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
integration-test-cli:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Set Swap Space
|
||||
uses: pierotofy/set-swap-space@master
|
||||
with:
|
||||
swap-size-gb: 10
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v16
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run CLI integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: nix develop --command -- make test_integration_cli
|
||||
35
.github/workflows/test-integration-derp.yml
vendored
Normal file
35
.github/workflows/test-integration-derp.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Integration Test DERP
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
integration-test-derp:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Set Swap Space
|
||||
uses: pierotofy/set-swap-space@master
|
||||
with:
|
||||
swap-size-gb: 10
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v16
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run Embedded DERP server integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: nix develop --command -- make test_integration_derp
|
||||
35
.github/workflows/test-integration-v2-kradalby.yml
vendored
Normal file
35
.github/workflows/test-integration-v2-kradalby.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Integration Test v2 - kradalby
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
integration-test-v2-kradalby:
|
||||
runs-on: [self-hosted, linux, x64, nixos, docker]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
# We currently only run one job at the time, so make sure
|
||||
# we just wipe all containers and all networks.
|
||||
- name: Clean up Docker
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
docker kill $(docker ps -q) || true
|
||||
docker network prune -f || true
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: nix develop --command -- make test_integration_v2_general
|
||||
58
.github/workflows/test-integration.yml
vendored
58
.github/workflows/test-integration.yml
vendored
@@ -1,58 +0,0 @@
|
||||
name: CI
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
integration-test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Set Swap Space
|
||||
uses: pierotofy/set-swap-space@master
|
||||
with:
|
||||
swap-size-gb: 10
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v14.1
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v16
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run CLI integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
uses: nick-fields/retry@v2
|
||||
with:
|
||||
timeout_minutes: 240
|
||||
max_attempts: 5
|
||||
retry_on: error
|
||||
command: nix develop --command -- make test_integration_cli
|
||||
|
||||
- name: Run Embedded DERP server integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
uses: nick-fields/retry@v2
|
||||
with:
|
||||
timeout_minutes: 240
|
||||
max_attempts: 5
|
||||
retry_on: error
|
||||
command: nix develop --command -- make test_integration_derp
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
uses: nick-fields/retry@v2
|
||||
with:
|
||||
timeout_minutes: 240
|
||||
max_attempts: 5
|
||||
retry_on: error
|
||||
command: nix develop --command -- make test_integration_general
|
||||
6
.github/workflows/test.yml
vendored
6
.github/workflows/test.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: CI
|
||||
name: Tests
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
@@ -7,13 +7,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v14.1
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
---
|
||||
run:
|
||||
timeout: 10m
|
||||
build-tags:
|
||||
- ts2019
|
||||
|
||||
issues:
|
||||
skip-dirs:
|
||||
@@ -26,6 +28,7 @@ linters:
|
||||
- ireturn
|
||||
- execinquery
|
||||
- exhaustruct
|
||||
- nolintlint
|
||||
|
||||
# We should strive to enable these:
|
||||
- wrapcheck
|
||||
@@ -33,6 +36,9 @@ linters:
|
||||
- makezero
|
||||
- maintidx
|
||||
|
||||
# Limits the methods of an interface to 10. We have more in integration tests
|
||||
- interfacebloat
|
||||
|
||||
# We might want to enable this, but it might be a lot of work
|
||||
- cyclop
|
||||
- nestif
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy -compat=1.18
|
||||
- go mod tidy -compat=1.19
|
||||
|
||||
release:
|
||||
prerelease: auto
|
||||
@@ -20,6 +20,8 @@ builds:
|
||||
- -mod=readonly
|
||||
ldflags:
|
||||
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
|
||||
tags:
|
||||
- ts2019
|
||||
|
||||
- id: darwin-arm64
|
||||
main: ./cmd/headscale/headscale.go
|
||||
@@ -34,6 +36,8 @@ builds:
|
||||
- -mod=readonly
|
||||
ldflags:
|
||||
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
|
||||
tags:
|
||||
- ts2019
|
||||
|
||||
- id: linux-amd64
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
@@ -46,6 +50,8 @@ builds:
|
||||
main: ./cmd/headscale/headscale.go
|
||||
ldflags:
|
||||
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
|
||||
tags:
|
||||
- ts2019
|
||||
|
||||
- id: linux-arm64
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
@@ -58,6 +64,8 @@ builds:
|
||||
main: ./cmd/headscale/headscale.go
|
||||
ldflags:
|
||||
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
|
||||
tags:
|
||||
- ts2019
|
||||
|
||||
archives:
|
||||
- id: golang-cross
|
||||
|
||||
59
CHANGELOG.md
59
CHANGELOG.md
@@ -1,6 +1,63 @@
|
||||
# CHANGELOG
|
||||
|
||||
## 0.17.0 (2022-XX-XX)
|
||||
## 0.18.x (2022-xx-xx)
|
||||
|
||||
### Changes
|
||||
|
||||
- Reworked routing and added support for subnet router failover [#1024](https://github.com/juanfont/headscale/pull/1024)
|
||||
- Added an OIDC AllowGroups Configuration options and authorization check [#1041](https://github.com/juanfont/headscale/pull/1041)
|
||||
- Set `db_ssl` to false by default [#1052](https://github.com/juanfont/headscale/pull/1052)
|
||||
- Fix duplicate nodes due to incorrect implementation of the protocol [#1058](https://github.com/juanfont/headscale/pull/1058)
|
||||
- Report if a machine is online in CLI more accurately [#1062](https://github.com/juanfont/headscale/pull/1062)
|
||||
|
||||
## 0.17.1 (2022-12-05)
|
||||
|
||||
### Changes
|
||||
|
||||
- Correct typo on macOS standalone profile link [#1028](https://github.com/juanfont/headscale/pull/1028)
|
||||
- Update platform docs with Fast User Switching [#1016](https://github.com/juanfont/headscale/pull/1016)
|
||||
|
||||
## 0.17.0 (2022-11-26)
|
||||
|
||||
### BREAKING
|
||||
|
||||
- `noise.private_key_path` has been added and is required for the new noise protocol.
|
||||
- Log level option `log_level` was moved to a distinct `log` config section and renamed to `level` [#768](https://github.com/juanfont/headscale/pull/768)
|
||||
- Removed Alpine Linux container image [#962](https://github.com/juanfont/headscale/pull/962)
|
||||
|
||||
### Important Changes
|
||||
|
||||
- Added support for Tailscale TS2021 protocol [#738](https://github.com/juanfont/headscale/pull/738)
|
||||
- Add experimental support for [SSH ACL](https://tailscale.com/kb/1018/acls/#tailscale-ssh) (see docs for limitations) [#847](https://github.com/juanfont/headscale/pull/847)
|
||||
- Please note that this support should be considered _partially_ implemented
|
||||
- SSH ACLs status:
|
||||
- Support `accept` and `check` (SSH can be enabled and used for connecting and authentication)
|
||||
- Rejecting connections **are not supported**, meaning that if you enable SSH, then assume that _all_ `ssh` connections **will be allowed**.
|
||||
- If you decied to try this feature, please carefully managed permissions by blocking port `22` with regular ACLs or do _not_ set `--ssh` on your clients.
|
||||
- We are currently improving our testing of the SSH ACLs, help us get an overview by testing and giving feedback.
|
||||
- This feature should be considered dangerous and it is disabled by default. Enable by setting `HEADSCALE_EXPERIMENTAL_FEATURE_SSH=1`.
|
||||
|
||||
### Changes
|
||||
|
||||
- Add ability to specify config location via env var `HEADSCALE_CONFIG` [#674](https://github.com/juanfont/headscale/issues/674)
|
||||
- Target Go 1.19 for Headscale [#778](https://github.com/juanfont/headscale/pull/778)
|
||||
- Target Tailscale v1.30.0 to build Headscale [#780](https://github.com/juanfont/headscale/pull/780)
|
||||
- Give a warning when running Headscale with reverse proxy improperly configured for WebSockets [#788](https://github.com/juanfont/headscale/pull/788)
|
||||
- Fix subnet routers with Primary Routes [#811](https://github.com/juanfont/headscale/pull/811)
|
||||
- Added support for JSON logs [#653](https://github.com/juanfont/headscale/issues/653)
|
||||
- Sanitise the node key passed to registration url [#823](https://github.com/juanfont/headscale/pull/823)
|
||||
- Add support for generating pre-auth keys with tags [#767](https://github.com/juanfont/headscale/pull/767)
|
||||
- Add support for evaluating `autoApprovers` ACL entries when a machine is registered [#763](https://github.com/juanfont/headscale/pull/763)
|
||||
- Add config flag to allow Headscale to start if OIDC provider is down [#829](https://github.com/juanfont/headscale/pull/829)
|
||||
- Fix prefix length comparison bug in AutoApprovers route evaluation [#862](https://github.com/juanfont/headscale/pull/862)
|
||||
- Random node DNS suffix only applied if names collide in namespace. [#766](https://github.com/juanfont/headscale/issues/766)
|
||||
- Remove `ip_prefix` configuration option and warning [#899](https://github.com/juanfont/headscale/pull/899)
|
||||
- Add `dns_config.override_local_dns` option [#905](https://github.com/juanfont/headscale/pull/905)
|
||||
- Fix some DNS config issues [#660](https://github.com/juanfont/headscale/issues/660)
|
||||
- Make it possible to disable TS2019 with build flag [#928](https://github.com/juanfont/headscale/pull/928)
|
||||
- Fix OIDC registration issues [#960](https://github.com/juanfont/headscale/pull/960) and [#971](https://github.com/juanfont/headscale/pull/971)
|
||||
- Add support for specifying NextDNS DNS-over-HTTPS resolver [#940](https://github.com/juanfont/headscale/pull/940)
|
||||
- Make more sslmode available for postgresql connection [#927](https://github.com/juanfont/headscale/pull/927)
|
||||
|
||||
## 0.16.4 (2022-08-21)
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Builder image
|
||||
FROM docker.io/golang:1.18.0-bullseye AS build
|
||||
FROM docker.io/golang:1.19-bullseye AS build
|
||||
ARG VERSION=dev
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
@@ -9,7 +9,7 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -tags ts2019 -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
|
||||
RUN strip /go/bin/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
# Builder image
|
||||
FROM docker.io/golang:1.18.0-alpine AS build
|
||||
ARG VERSION=dev
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
|
||||
COPY go.mod go.sum /go/src/headscale/
|
||||
RUN apk add gcc musl-dev
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
|
||||
RUN strip /go/bin/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
# Production image
|
||||
FROM docker.io/alpine:latest
|
||||
|
||||
COPY --from=build /go/bin/headscale /bin/headscale
|
||||
ENV TZ UTC
|
||||
|
||||
EXPOSE 8080/tcp
|
||||
CMD ["headscale"]
|
||||
@@ -1,5 +1,5 @@
|
||||
# Builder image
|
||||
FROM docker.io/golang:1.18.0-bullseye AS build
|
||||
FROM docker.io/golang:1.19-bullseye AS build
|
||||
ARG VERSION=dev
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
@@ -9,11 +9,11 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -tags ts2019 -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
# Debug image
|
||||
FROM gcr.io/distroless/base-debian11:debug
|
||||
FROM docker.io/golang:1.19.0-bullseye
|
||||
|
||||
COPY --from=build /go/bin/headscale /bin/headscale
|
||||
ENV TZ UTC
|
||||
|
||||
@@ -4,14 +4,16 @@ ARG TAILSCALE_VERSION=*
|
||||
ARG TAILSCALE_CHANNEL=stable
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y gnupg curl \
|
||||
&& apt-get install -y gnupg curl ssh \
|
||||
&& curl -fsSL https://pkgs.tailscale.com/${TAILSCALE_CHANNEL}/ubuntu/focal.gpg | apt-key add - \
|
||||
&& curl -fsSL https://pkgs.tailscale.com/${TAILSCALE_CHANNEL}/ubuntu/focal.list | tee /etc/apt/sources.list.d/tailscale.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y ca-certificates tailscale=${TAILSCALE_VERSION} dnsutils \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN adduser --shell=/bin/bash ssh-it-user
|
||||
|
||||
ADD integration_test/etc_embedded_derp/tls/server.crt /usr/local/share/ca-certificates/
|
||||
RUN chmod 644 /usr/local/share/ca-certificates/server.crt
|
||||
RUN chmod 644 /usr/local/share/ca-certificates/server.crt
|
||||
|
||||
RUN update-ca-certificates
|
||||
|
||||
@@ -1,13 +1,16 @@
|
||||
FROM golang:latest
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y ca-certificates dnsutils git iptables \
|
||||
&& apt-get install -y ca-certificates dnsutils git iptables ssh \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN useradd --shell=/bin/bash --create-home ssh-it-user
|
||||
|
||||
RUN git clone https://github.com/tailscale/tailscale.git
|
||||
|
||||
WORKDIR tailscale
|
||||
WORKDIR /go/tailscale
|
||||
|
||||
RUN git checkout main
|
||||
|
||||
RUN sh build_dist.sh tailscale.com/cmd/tailscale
|
||||
RUN sh build_dist.sh tailscale.com/cmd/tailscaled
|
||||
@@ -16,6 +19,6 @@ RUN cp tailscale /usr/local/bin/
|
||||
RUN cp tailscaled /usr/local/bin/
|
||||
|
||||
ADD integration_test/etc_embedded_derp/tls/server.crt /usr/local/share/ca-certificates/
|
||||
RUN chmod 644 /usr/local/share/ca-certificates/server.crt
|
||||
RUN chmod 644 /usr/local/share/ca-certificates/server.crt
|
||||
|
||||
RUN update-ca-certificates
|
||||
|
||||
37
Makefile
37
Makefile
@@ -10,6 +10,8 @@ ifeq ($(filter $(GOOS), openbsd netbsd soloaris plan9), )
|
||||
else
|
||||
endif
|
||||
|
||||
TAGS = -tags ts2019
|
||||
|
||||
# GO_SOURCES = $(wildcard *.go)
|
||||
# PROTO_SOURCES = $(wildcard **/*.proto)
|
||||
GO_SOURCES = $(call rwildcard,,*.go)
|
||||
@@ -17,23 +19,44 @@ PROTO_SOURCES = $(call rwildcard,,*.proto)
|
||||
|
||||
|
||||
build:
|
||||
GOOS=$(GOOS) CGO_ENABLED=0 go build -trimpath $(pieflags) -mod=readonly -ldflags "-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$(version)" cmd/headscale/headscale.go
|
||||
nix build
|
||||
|
||||
dev: lint test build
|
||||
|
||||
test:
|
||||
@go test -coverprofile=coverage.out ./...
|
||||
@go test $(TAGS) -short -coverprofile=coverage.out ./...
|
||||
|
||||
test_integration: test_integration_cli test_integration_derp test_integration_general
|
||||
test_integration: test_integration_cli test_integration_derp test_integration_oidc test_integration_v2_general
|
||||
|
||||
test_integration_cli:
|
||||
go test -failfast -tags integration_cli,integration -timeout 30m -count=1 ./...
|
||||
docker network rm $$(docker network ls --filter name=headscale --quiet) || true
|
||||
docker network create headscale-test || true
|
||||
docker run -t --rm \
|
||||
--network headscale-test \
|
||||
-v ~/.cache/hs-integration-go:/go \
|
||||
-v $$PWD:$$PWD -w $$PWD \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock golang:1 \
|
||||
go test $(TAGS) -failfast -timeout 30m -count=1 -run IntegrationCLI ./...
|
||||
|
||||
test_integration_derp:
|
||||
go test -failfast -tags integration_derp,integration -timeout 30m -count=1 ./...
|
||||
docker network rm $$(docker network ls --filter name=headscale --quiet) || true
|
||||
docker network create headscale-test || true
|
||||
docker run -t --rm \
|
||||
--network headscale-test \
|
||||
-v ~/.cache/hs-integration-go:/go \
|
||||
-v $$PWD:$$PWD -w $$PWD \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock golang:1 \
|
||||
go test $(TAGS) -failfast -timeout 30m -count=1 -run IntegrationDERP ./...
|
||||
|
||||
test_integration_general:
|
||||
go test -failfast -tags integration_general,integration -timeout 30m -count=1 ./...
|
||||
test_integration_v2_general:
|
||||
docker run \
|
||||
-t --rm \
|
||||
-v ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
-v $$PWD:$$PWD -w $$PWD/integration \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
golang:1 \
|
||||
go test $(TAGS) -failfast ./... -timeout 120m -parallel 8
|
||||
|
||||
coverprofile_func:
|
||||
go tool cover -func=coverage.out
|
||||
|
||||
245
README.md
245
README.md
@@ -1,4 +1,4 @@
|
||||
# headscale
|
||||

|
||||
|
||||

|
||||
|
||||
@@ -195,6 +195,15 @@ make build
|
||||
<sub style="font-size:14px"><b>Jiang Zhu</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/tsujamin>
|
||||
<img src=https://avatars.githubusercontent.com/u/2435619?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Benjamin Roberts/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Benjamin Roberts</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/reynico>
|
||||
<img src=https://avatars.githubusercontent.com/u/715768?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Nico/>
|
||||
@@ -202,8 +211,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Nico</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/e-zk>
|
||||
<img src=https://avatars.githubusercontent.com/u/58356365?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=e-zk/>
|
||||
@@ -239,6 +246,8 @@ make build
|
||||
<sub style="font-size:14px"><b>ohdearaugustin</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/mpldr>
|
||||
<img src=https://avatars.githubusercontent.com/u/33086936?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Moritz Poldrack/>
|
||||
@@ -246,8 +255,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Moritz Poldrack</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/GrigoriyMikhalkin>
|
||||
<img src=https://avatars.githubusercontent.com/u/3637857?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=GrigoriyMikhalkin/>
|
||||
@@ -255,6 +262,20 @@ make build
|
||||
<sub style="font-size:14px"><b>GrigoriyMikhalkin</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/mike-lloyd03>
|
||||
<img src=https://avatars.githubusercontent.com/u/49411532?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mike Lloyd/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Mike Lloyd</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/iSchluff>
|
||||
<img src=https://avatars.githubusercontent.com/u/1429641?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Anton Schubert/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Anton Schubert</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/Niek>
|
||||
<img src=https://avatars.githubusercontent.com/u/213140?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Niek van der Maas/>
|
||||
@@ -269,11 +290,20 @@ make build
|
||||
<sub style="font-size:14px"><b>Eugen Biegler</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/iSchluff>
|
||||
<img src=https://avatars.githubusercontent.com/u/1429641?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Anton Schubert/>
|
||||
<a href=https://github.com/617a7a>
|
||||
<img src=https://avatars.githubusercontent.com/u/67651251?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Azz/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Anton Schubert</b></sub>
|
||||
<sub style="font-size:14px"><b>Azz</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/evenh>
|
||||
<img src=https://avatars.githubusercontent.com/u/2701536?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Even Holthe/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Even Holthe</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
@@ -283,6 +313,20 @@ make build
|
||||
<sub style="font-size:14px"><b>Aaron Bieber</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/kazauwa>
|
||||
<img src=https://avatars.githubusercontent.com/u/12330159?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Igor Perepilitsyn/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Igor Perepilitsyn</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/Aluxima>
|
||||
<img src=https://avatars.githubusercontent.com/u/16262531?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Laurent Marchaud/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Laurent Marchaud</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/fdelucchijr>
|
||||
<img src=https://avatars.githubusercontent.com/u/69133647?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Fernando De Lucchi/>
|
||||
@@ -293,10 +337,17 @@ make build
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/hdhoang>
|
||||
<img src=https://avatars.githubusercontent.com/u/12537?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Hoàng Đức Hiếu/>
|
||||
<a href=https://github.com/OrvilleQ>
|
||||
<img src=https://avatars.githubusercontent.com/u/21377465?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Orville Q. Song/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Hoàng Đức Hiếu</b></sub>
|
||||
<sub style="font-size:14px"><b>Orville Q. Song</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/hdhoang>
|
||||
<img src=https://avatars.githubusercontent.com/u/12537?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=hdhoang/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>hdhoang</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
@@ -313,6 +364,13 @@ make build
|
||||
<sub style="font-size:14px"><b>Deon Thomas</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/madjam002>
|
||||
<img src=https://avatars.githubusercontent.com/u/679137?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jamie Greeff/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Jamie Greeff</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ChibangLW>
|
||||
<img src=https://avatars.githubusercontent.com/u/22293464?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ChibangLW/>
|
||||
@@ -320,6 +378,8 @@ make build
|
||||
<sub style="font-size:14px"><b>ChibangLW</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/mevansam>
|
||||
<img src=https://avatars.githubusercontent.com/u/403630?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mevan Samaratunga/>
|
||||
@@ -334,8 +394,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Michael G.</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ptman>
|
||||
<img src=https://avatars.githubusercontent.com/u/24669?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Paul Tötterman/>
|
||||
@@ -357,6 +415,15 @@ make build
|
||||
<sub style="font-size:14px"><b>Stefan Majer</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/kevin1sMe>
|
||||
<img src=https://avatars.githubusercontent.com/u/6886076?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=kevinlin/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>kevinlin</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/artemklevtsov>
|
||||
<img src=https://avatars.githubusercontent.com/u/603798?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Artem Klevtsov/>
|
||||
@@ -371,6 +438,13 @@ make build
|
||||
<sub style="font-size:14px"><b>Casey Marshall</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/CNLHC>
|
||||
<img src=https://avatars.githubusercontent.com/u/21005146?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=LiuHanCheng/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>LiuHanCheng</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/pvinis>
|
||||
<img src=https://avatars.githubusercontent.com/u/100233?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pavlos Vinieratos/>
|
||||
@@ -378,8 +452,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Pavlos Vinieratos</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/SilverBut>
|
||||
<img src=https://avatars.githubusercontent.com/u/6560655?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Silver Bullet/>
|
||||
@@ -387,6 +459,22 @@ make build
|
||||
<sub style="font-size:14px"><b>Silver Bullet</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/snh>
|
||||
<img src=https://avatars.githubusercontent.com/u/2051768?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Steven Honson/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Steven Honson</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ratsclub>
|
||||
<img src=https://avatars.githubusercontent.com/u/25647735?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Victor Freire/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Victor Freire</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/lachy2849>
|
||||
<img src=https://avatars.githubusercontent.com/u/98844035?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=lachy2849/>
|
||||
@@ -408,6 +496,13 @@ make build
|
||||
<sub style="font-size:14px"><b>Abraham Ingersoll</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/puzpuzpuz>
|
||||
<img src=https://avatars.githubusercontent.com/u/37772591?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Andrei Pechkurov/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Andrei Pechkurov</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/apognu>
|
||||
<img src=https://avatars.githubusercontent.com/u/3017182?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Antoine POPINEAU/>
|
||||
@@ -415,6 +510,8 @@ make build
|
||||
<sub style="font-size:14px"><b>Antoine POPINEAU</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/aofei>
|
||||
<img src=https://avatars.githubusercontent.com/u/5037285?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Aofei Sheng/>
|
||||
@@ -422,8 +519,13 @@ make build
|
||||
<sub style="font-size:14px"><b>Aofei Sheng</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/arnarg>
|
||||
<img src=https://avatars.githubusercontent.com/u/1291396?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Arnar/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Arnar</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/awoimbee>
|
||||
<img src=https://avatars.githubusercontent.com/u/22431493?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Arthur Woimbée/>
|
||||
@@ -452,6 +554,8 @@ make build
|
||||
<sub style="font-size:14px"><b>kundel</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/fkr>
|
||||
<img src=https://avatars.githubusercontent.com/u/51063?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Felix Kronlage-Dammers/>
|
||||
@@ -466,8 +570,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Felix Yan</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/JJGadgets>
|
||||
<img src=https://avatars.githubusercontent.com/u/5709019?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=JJGadgets/>
|
||||
@@ -475,13 +577,6 @@ make build
|
||||
<sub style="font-size:14px"><b>JJGadgets</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/madjam002>
|
||||
<img src=https://avatars.githubusercontent.com/u/679137?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jamie Greeff/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Jamie Greeff</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/jimt>
|
||||
<img src=https://avatars.githubusercontent.com/u/180326?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jim Tittsler/>
|
||||
@@ -489,6 +584,13 @@ make build
|
||||
<sub style="font-size:14px"><b>Jim Tittsler</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ShadowJonathan>
|
||||
<img src=https://avatars.githubusercontent.com/u/22740616?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jonathan de Jong/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Jonathan de Jong</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/piec>
|
||||
<img src=https://avatars.githubusercontent.com/u/781471?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pierre Carru/>
|
||||
@@ -496,6 +598,15 @@ make build
|
||||
<sub style="font-size:14px"><b>Pierre Carru</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/Donran>
|
||||
<img src=https://avatars.githubusercontent.com/u/4838348?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pontus N/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Pontus N</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/nnsee>
|
||||
<img src=https://avatars.githubusercontent.com/u/36747857?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Rasmus Moorats/>
|
||||
@@ -510,13 +621,11 @@ make build
|
||||
<sub style="font-size:14px"><b>rcursaru</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/renovate-bot>
|
||||
<img src=https://avatars.githubusercontent.com/u/25180681?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=WhiteSource Renovate/>
|
||||
<img src=https://avatars.githubusercontent.com/u/25180681?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mend Renovate/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>WhiteSource Renovate</b></sub>
|
||||
<sub style="font-size:14px"><b>Mend Renovate</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
@@ -533,6 +642,15 @@ make build
|
||||
<sub style="font-size:14px"><b>Shaanan Cohney</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/stefanvanburen>
|
||||
<img src=https://avatars.githubusercontent.com/u/622527?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Stefan VanBuren/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Stefan VanBuren</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/sophware>
|
||||
<img src=https://avatars.githubusercontent.com/u/41669?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=sophware/>
|
||||
@@ -554,8 +672,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Teteros</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/gitter-badger>
|
||||
<img src=https://avatars.githubusercontent.com/u/8518239?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=The Gitter Badger/>
|
||||
@@ -570,6 +686,15 @@ make build
|
||||
<sub style="font-size:14px"><b>Tianon Gravi</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/thetillhoff>
|
||||
<img src=https://avatars.githubusercontent.com/u/25052289?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Till Hoffmann/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Till Hoffmann</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/woudsma>
|
||||
<img src=https://avatars.githubusercontent.com/u/6162978?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tjerk Woudsma/>
|
||||
@@ -584,6 +709,20 @@ make build
|
||||
<sub style="font-size:14px"><b>Yang Bin</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/gozssky>
|
||||
<img src=https://avatars.githubusercontent.com/u/17199941?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Yujie Xia/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Yujie Xia</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/newellz2>
|
||||
<img src=https://avatars.githubusercontent.com/u/52436542?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zachary N./>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Zachary N.</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/zekker6>
|
||||
<img src=https://avatars.githubusercontent.com/u/1367798?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zakhar Bessarab/>
|
||||
@@ -591,6 +730,15 @@ make build
|
||||
<sub style="font-size:14px"><b>Zakhar Bessarab</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/zhzy0077>
|
||||
<img src=https://avatars.githubusercontent.com/u/8717471?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zhiyuan Zheng/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Zhiyuan Zheng</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/Bpazy>
|
||||
<img src=https://avatars.githubusercontent.com/u/9838749?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Ziyuan Han/>
|
||||
@@ -598,8 +746,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Ziyuan Han</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/derelm>
|
||||
<img src=https://avatars.githubusercontent.com/u/465155?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=derelm/>
|
||||
@@ -622,10 +768,26 @@ make build
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/lion24>
|
||||
<img src=https://avatars.githubusercontent.com/u/1382102?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=lion24/>
|
||||
<a href=https://github.com/magichuihui>
|
||||
<img src=https://avatars.githubusercontent.com/u/10866198?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=suhelen/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>lion24</b></sub>
|
||||
<sub style="font-size:14px"><b>suhelen</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/lion24>
|
||||
<img src=https://avatars.githubusercontent.com/u/1382102?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=sharkonet/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>sharkonet</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/manju-rn>
|
||||
<img src=https://avatars.githubusercontent.com/u/26291847?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=manju-rn/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>manju-rn</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
@@ -635,6 +797,13 @@ make build
|
||||
<sub style="font-size:14px"><b>pernila</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/phpmalik>
|
||||
<img src=https://avatars.githubusercontent.com/u/26834645?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=phpmalik/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>phpmalik</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/Wakeful-Cloud>
|
||||
<img src=https://avatars.githubusercontent.com/u/38930607?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Wakeful-Cloud/>
|
||||
@@ -642,8 +811,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Wakeful-Cloud</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/xpzouying>
|
||||
<img src=https://avatars.githubusercontent.com/u/3946563?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=zy/>
|
||||
|
||||
177
acls.go
177
acls.go
@@ -5,15 +5,17 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/netip"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/tailscale/hujson"
|
||||
"gopkg.in/yaml.v3"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
@@ -54,6 +56,8 @@ const (
|
||||
ProtocolFC = 133 // Fibre Channel
|
||||
)
|
||||
|
||||
var featureEnableSSH = envknob.RegisterBool("HEADSCALE_EXPERIMENTAL_FEATURE_SSH")
|
||||
|
||||
// LoadACLPolicy loads the ACL policy from the specify path, and generates the ACL rules.
|
||||
func (h *Headscale) LoadACLPolicy(path string) error {
|
||||
log.Debug().
|
||||
@@ -113,36 +117,50 @@ func (h *Headscale) LoadACLPolicy(path string) error {
|
||||
}
|
||||
|
||||
func (h *Headscale) UpdateACLRules() error {
|
||||
rules, err := h.generateACLRules()
|
||||
machines, err := h.ListMachines()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if h.aclPolicy == nil {
|
||||
return errEmptyPolicy
|
||||
}
|
||||
|
||||
rules, err := generateACLRules(machines, *h.aclPolicy, h.cfg.OIDC.StripEmaildomain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Trace().Interface("ACL", rules).Msg("ACL rules generated")
|
||||
h.aclRules = rules
|
||||
|
||||
if featureEnableSSH() {
|
||||
sshRules, err := h.generateSSHRules()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Trace().Interface("SSH", sshRules).Msg("SSH rules generated")
|
||||
if h.sshPolicy == nil {
|
||||
h.sshPolicy = &tailcfg.SSHPolicy{}
|
||||
}
|
||||
h.sshPolicy.Rules = sshRules
|
||||
} else if h.aclPolicy != nil && len(h.aclPolicy.SSHs) > 0 {
|
||||
log.Info().Msg("SSH ACLs has been defined, but HEADSCALE_EXPERIMENTAL_FEATURE_SSH is not enabled, this is a unstable feature, check docs before activating")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Headscale) generateACLRules() ([]tailcfg.FilterRule, error) {
|
||||
func generateACLRules(machines []Machine, aclPolicy ACLPolicy, stripEmaildomain bool) ([]tailcfg.FilterRule, error) {
|
||||
rules := []tailcfg.FilterRule{}
|
||||
|
||||
if h.aclPolicy == nil {
|
||||
return nil, errEmptyPolicy
|
||||
}
|
||||
|
||||
machines, err := h.ListMachines()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for index, acl := range h.aclPolicy.ACLs {
|
||||
for index, acl := range aclPolicy.ACLs {
|
||||
if acl.Action != "accept" {
|
||||
return nil, errInvalidAction
|
||||
}
|
||||
|
||||
srcIPs := []string{}
|
||||
for innerIndex, src := range acl.Sources {
|
||||
srcs, err := h.generateACLPolicySrcIP(machines, *h.aclPolicy, src)
|
||||
srcs, err := generateACLPolicySrcIP(machines, aclPolicy, src, stripEmaildomain)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing ACL %d, Source %d", index, innerIndex)
|
||||
@@ -162,11 +180,12 @@ func (h *Headscale) generateACLRules() ([]tailcfg.FilterRule, error) {
|
||||
|
||||
destPorts := []tailcfg.NetPortRange{}
|
||||
for innerIndex, dest := range acl.Destinations {
|
||||
dests, err := h.generateACLPolicyDest(
|
||||
dests, err := generateACLPolicyDest(
|
||||
machines,
|
||||
*h.aclPolicy,
|
||||
aclPolicy,
|
||||
dest,
|
||||
needsWildcard,
|
||||
stripEmaildomain,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
@@ -187,19 +206,126 @@ func (h *Headscale) generateACLRules() ([]tailcfg.FilterRule, error) {
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) generateACLPolicySrcIP(
|
||||
func (h *Headscale) generateSSHRules() ([]*tailcfg.SSHRule, error) {
|
||||
rules := []*tailcfg.SSHRule{}
|
||||
|
||||
if h.aclPolicy == nil {
|
||||
return nil, errEmptyPolicy
|
||||
}
|
||||
|
||||
machines, err := h.ListMachines()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
acceptAction := tailcfg.SSHAction{
|
||||
Message: "",
|
||||
Reject: false,
|
||||
Accept: true,
|
||||
SessionDuration: 0,
|
||||
AllowAgentForwarding: false,
|
||||
HoldAndDelegate: "",
|
||||
AllowLocalPortForwarding: true,
|
||||
}
|
||||
|
||||
rejectAction := tailcfg.SSHAction{
|
||||
Message: "",
|
||||
Reject: true,
|
||||
Accept: false,
|
||||
SessionDuration: 0,
|
||||
AllowAgentForwarding: false,
|
||||
HoldAndDelegate: "",
|
||||
AllowLocalPortForwarding: false,
|
||||
}
|
||||
|
||||
for index, sshACL := range h.aclPolicy.SSHs {
|
||||
action := rejectAction
|
||||
switch sshACL.Action {
|
||||
case "accept":
|
||||
action = acceptAction
|
||||
case "check":
|
||||
checkAction, err := sshCheckAction(sshACL.CheckPeriod)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing SSH %d, check action with unparsable duration '%s'", index, sshACL.CheckPeriod)
|
||||
} else {
|
||||
action = *checkAction
|
||||
}
|
||||
default:
|
||||
log.Error().
|
||||
Msgf("Error parsing SSH %d, unknown action '%s'", index, sshACL.Action)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
principals := make([]*tailcfg.SSHPrincipal, 0, len(sshACL.Sources))
|
||||
for innerIndex, rawSrc := range sshACL.Sources {
|
||||
expandedSrcs, err := expandAlias(
|
||||
machines,
|
||||
*h.aclPolicy,
|
||||
rawSrc,
|
||||
h.cfg.OIDC.StripEmaildomain,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing SSH %d, Source %d", index, innerIndex)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
for _, expandedSrc := range expandedSrcs {
|
||||
principals = append(principals, &tailcfg.SSHPrincipal{
|
||||
NodeIP: expandedSrc,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
userMap := make(map[string]string, len(sshACL.Users))
|
||||
for _, user := range sshACL.Users {
|
||||
userMap[user] = "="
|
||||
}
|
||||
rules = append(rules, &tailcfg.SSHRule{
|
||||
RuleExpires: nil,
|
||||
Principals: principals,
|
||||
SSHUsers: userMap,
|
||||
Action: &action,
|
||||
})
|
||||
}
|
||||
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
func sshCheckAction(duration string) (*tailcfg.SSHAction, error) {
|
||||
sessionLength, err := time.ParseDuration(duration)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &tailcfg.SSHAction{
|
||||
Message: "",
|
||||
Reject: false,
|
||||
Accept: true,
|
||||
SessionDuration: sessionLength,
|
||||
AllowAgentForwarding: false,
|
||||
HoldAndDelegate: "",
|
||||
AllowLocalPortForwarding: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func generateACLPolicySrcIP(
|
||||
machines []Machine,
|
||||
aclPolicy ACLPolicy,
|
||||
src string,
|
||||
stripEmaildomain bool,
|
||||
) ([]string, error) {
|
||||
return expandAlias(machines, aclPolicy, src, h.cfg.OIDC.StripEmaildomain)
|
||||
return expandAlias(machines, aclPolicy, src, stripEmaildomain)
|
||||
}
|
||||
|
||||
func (h *Headscale) generateACLPolicyDest(
|
||||
func generateACLPolicyDest(
|
||||
machines []Machine,
|
||||
aclPolicy ACLPolicy,
|
||||
dest string,
|
||||
needsWildcard bool,
|
||||
stripEmaildomain bool,
|
||||
) ([]tailcfg.NetPortRange, error) {
|
||||
tokens := strings.Split(dest, ":")
|
||||
if len(tokens) < expectedTokenItems || len(tokens) > 3 {
|
||||
@@ -223,7 +349,7 @@ func (h *Headscale) generateACLPolicyDest(
|
||||
machines,
|
||||
aclPolicy,
|
||||
alias,
|
||||
h.cfg.OIDC.StripEmaildomain,
|
||||
stripEmaildomain,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -260,12 +386,7 @@ func (h *Headscale) generateACLPolicyDest(
|
||||
func parseProtocol(protocol string) ([]int, bool, error) {
|
||||
switch protocol {
|
||||
case "":
|
||||
return []int{
|
||||
protocolICMP,
|
||||
protocolIPv6ICMP,
|
||||
protocolTCP,
|
||||
protocolUDP,
|
||||
}, false, nil
|
||||
return nil, false, nil
|
||||
case "igmp":
|
||||
return []int{protocolIGMP}, true, nil
|
||||
case "ipv4", "ip-in-ip":
|
||||
@@ -394,13 +515,13 @@ func expandAlias(
|
||||
}
|
||||
|
||||
// if alias is an IP
|
||||
ip, err := netaddr.ParseIP(alias)
|
||||
ip, err := netip.ParseAddr(alias)
|
||||
if err == nil {
|
||||
return []string{ip.String()}, nil
|
||||
}
|
||||
|
||||
// if alias is an CIDR
|
||||
cidr, err := netaddr.ParseIPPrefix(alias)
|
||||
cidr, err := netip.ParsePrefix(alias)
|
||||
if err == nil {
|
||||
return []string{cidr.String()}, nil
|
||||
}
|
||||
|
||||
219
acls_test.go
219
acls_test.go
@@ -2,11 +2,12 @@ package headscale
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/netip"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
@@ -53,7 +54,7 @@ func (s *Suite) TestBasicRule(c *check.C) {
|
||||
err := app.LoadACLPolicy("./tests/acls/acl_policy_basic_1.hujson")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := app.generateACLRules()
|
||||
rules, err := generateACLRules([]Machine{}, *app.aclPolicy, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
}
|
||||
@@ -73,6 +74,81 @@ func (s *Suite) TestInvalidAction(c *check.C) {
|
||||
c.Assert(errors.Is(err, errInvalidAction), check.Equals, true)
|
||||
}
|
||||
|
||||
func (s *Suite) TestSshRules(c *check.C) {
|
||||
envknob.Setenv("HEADSCALE_EXPERIMENTAL_FEATURE_SSH", "1")
|
||||
|
||||
namespace, err := app.CreateNamespace("user1")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine("user1", "testmachine")
|
||||
c.Assert(err, check.NotNil)
|
||||
hostInfo := tailcfg.Hostinfo{
|
||||
OS: "centos",
|
||||
Hostname: "testmachine",
|
||||
RequestTags: []string{"tag:test"},
|
||||
}
|
||||
|
||||
machine := Machine{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testmachine",
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.1")},
|
||||
NamespaceID: namespace.ID,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
HostInfo: HostInfo(hostInfo),
|
||||
}
|
||||
app.db.Save(&machine)
|
||||
|
||||
app.aclPolicy = &ACLPolicy{
|
||||
Groups: Groups{
|
||||
"group:test": []string{"user1"},
|
||||
},
|
||||
Hosts: Hosts{
|
||||
"client": netip.PrefixFrom(netip.MustParseAddr("100.64.99.42"), 32),
|
||||
},
|
||||
ACLs: []ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"*"},
|
||||
Destinations: []string{"*:*"},
|
||||
},
|
||||
},
|
||||
SSHs: []SSH{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"group:test"},
|
||||
Destinations: []string{"client"},
|
||||
Users: []string{"autogroup:nonroot"},
|
||||
},
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"*"},
|
||||
Destinations: []string{"client"},
|
||||
Users: []string{"autogroup:nonroot"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err = app.UpdateACLRules()
|
||||
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(app.sshPolicy, check.NotNil)
|
||||
c.Assert(app.sshPolicy.Rules, check.HasLen, 2)
|
||||
c.Assert(app.sshPolicy.Rules[0].SSHUsers, check.HasLen, 1)
|
||||
c.Assert(app.sshPolicy.Rules[0].Principals, check.HasLen, 1)
|
||||
c.Assert(app.sshPolicy.Rules[0].Principals[0].NodeIP, check.Matches, "100.64.0.1")
|
||||
|
||||
c.Assert(app.sshPolicy.Rules[1].SSHUsers, check.HasLen, 1)
|
||||
c.Assert(app.sshPolicy.Rules[1].Principals, check.HasLen, 1)
|
||||
c.Assert(app.sshPolicy.Rules[1].Principals[0].NodeIP, check.Matches, "*")
|
||||
}
|
||||
|
||||
func (s *Suite) TestInvalidGroupInGroup(c *check.C) {
|
||||
// this ACL is wrong because the group in Sources sections doesn't exist
|
||||
app.aclPolicy = &ACLPolicy{
|
||||
@@ -114,7 +190,7 @@ func (s *Suite) TestValidExpandTagOwnersInSources(c *check.C) {
|
||||
namespace, err := app.CreateNamespace("user1")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine("user1", "testmachine")
|
||||
@@ -131,7 +207,7 @@ func (s *Suite) TestValidExpandTagOwnersInSources(c *check.C) {
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testmachine",
|
||||
IPAddresses: MachineAddresses{netaddr.MustParseIP("100.64.0.1")},
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.1")},
|
||||
NamespaceID: namespace.ID,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
@@ -164,7 +240,7 @@ func (s *Suite) TestValidExpandTagOwnersInDestinations(c *check.C) {
|
||||
namespace, err := app.CreateNamespace("user1")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine("user1", "testmachine")
|
||||
@@ -181,7 +257,7 @@ func (s *Suite) TestValidExpandTagOwnersInDestinations(c *check.C) {
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testmachine",
|
||||
IPAddresses: MachineAddresses{netaddr.MustParseIP("100.64.0.1")},
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.1")},
|
||||
NamespaceID: namespace.ID,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
@@ -214,7 +290,7 @@ func (s *Suite) TestInvalidTagValidNamespace(c *check.C) {
|
||||
namespace, err := app.CreateNamespace("user1")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine("user1", "testmachine")
|
||||
@@ -231,7 +307,7 @@ func (s *Suite) TestInvalidTagValidNamespace(c *check.C) {
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testmachine",
|
||||
IPAddresses: MachineAddresses{netaddr.MustParseIP("100.64.0.1")},
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.1")},
|
||||
NamespaceID: namespace.ID,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
@@ -263,7 +339,7 @@ func (s *Suite) TestValidTagInvalidNamespace(c *check.C) {
|
||||
namespace, err := app.CreateNamespace("user1")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine("user1", "webserver")
|
||||
@@ -280,7 +356,7 @@ func (s *Suite) TestValidTagInvalidNamespace(c *check.C) {
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "webserver",
|
||||
IPAddresses: MachineAddresses{netaddr.MustParseIP("100.64.0.1")},
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.1")},
|
||||
NamespaceID: namespace.ID,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
@@ -299,7 +375,7 @@ func (s *Suite) TestValidTagInvalidNamespace(c *check.C) {
|
||||
NodeKey: "bar2",
|
||||
DiscoKey: "faab",
|
||||
Hostname: "user",
|
||||
IPAddresses: MachineAddresses{netaddr.MustParseIP("100.64.0.2")},
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.2")},
|
||||
NamespaceID: namespace.ID,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
@@ -335,7 +411,7 @@ func (s *Suite) TestPortRange(c *check.C) {
|
||||
err := app.LoadACLPolicy("./tests/acls/acl_policy_basic_range.hujson")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := app.generateACLRules()
|
||||
rules, err := generateACLRules([]Machine{}, *app.aclPolicy, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
|
||||
@@ -349,7 +425,7 @@ func (s *Suite) TestProtocolParsing(c *check.C) {
|
||||
err := app.LoadACLPolicy("./tests/acls/acl_policy_basic_protocols.hujson")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := app.generateACLRules()
|
||||
rules, err := generateACLRules([]Machine{}, *app.aclPolicy, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
|
||||
@@ -363,7 +439,7 @@ func (s *Suite) TestPortWildcard(c *check.C) {
|
||||
err := app.LoadACLPolicy("./tests/acls/acl_policy_basic_wildcards.hujson")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := app.generateACLRules()
|
||||
rules, err := generateACLRules([]Machine{}, *app.aclPolicy, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
|
||||
@@ -379,7 +455,7 @@ func (s *Suite) TestPortWildcardYAML(c *check.C) {
|
||||
err := app.LoadACLPolicy("./tests/acls/acl_policy_basic_wildcards.yaml")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := app.generateACLRules()
|
||||
rules, err := generateACLRules([]Machine{}, *app.aclPolicy, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
|
||||
@@ -395,7 +471,7 @@ func (s *Suite) TestPortNamespace(c *check.C) {
|
||||
namespace, err := app.CreateNamespace("testnamespace")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine("testnamespace", "testmachine")
|
||||
@@ -419,7 +495,10 @@ func (s *Suite) TestPortNamespace(c *check.C) {
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := app.generateACLRules()
|
||||
machines, err := app.ListMachines()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := generateACLRules(machines, *app.aclPolicy, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
|
||||
@@ -437,7 +516,7 @@ func (s *Suite) TestPortGroup(c *check.C) {
|
||||
namespace, err := app.CreateNamespace("testnamespace")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine("testnamespace", "testmachine")
|
||||
@@ -459,7 +538,10 @@ func (s *Suite) TestPortGroup(c *check.C) {
|
||||
err = app.LoadACLPolicy("./tests/acls/acl_policy_basic_groups.hujson")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := app.generateACLRules()
|
||||
machines, err := app.ListMachines()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := generateACLRules(machines, *app.aclPolicy, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
|
||||
@@ -825,7 +907,6 @@ func Test_listMachinesInNamespace(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// nolint
|
||||
func Test_expandAlias(t *testing.T) {
|
||||
type args struct {
|
||||
machines []Machine
|
||||
@@ -844,10 +925,10 @@ func Test_expandAlias(t *testing.T) {
|
||||
args: args{
|
||||
alias: "*",
|
||||
machines: []Machine{
|
||||
{IPAddresses: MachineAddresses{netaddr.MustParseIP("100.64.0.1")}},
|
||||
{IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.1")}},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.78.84.227"),
|
||||
netip.MustParseAddr("100.78.84.227"),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -864,25 +945,25 @@ func Test_expandAlias(t *testing.T) {
|
||||
machines: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.1"),
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.2"),
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.3"),
|
||||
netip.MustParseAddr("100.64.0.3"),
|
||||
},
|
||||
Namespace: Namespace{Name: "marc"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.4"),
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
Namespace: Namespace{Name: "mickael"},
|
||||
},
|
||||
@@ -902,25 +983,25 @@ func Test_expandAlias(t *testing.T) {
|
||||
machines: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.1"),
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.2"),
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.3"),
|
||||
netip.MustParseAddr("100.64.0.3"),
|
||||
},
|
||||
Namespace: Namespace{Name: "marc"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.4"),
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
Namespace: Namespace{Name: "mickael"},
|
||||
},
|
||||
@@ -951,7 +1032,7 @@ func Test_expandAlias(t *testing.T) {
|
||||
machines: []Machine{},
|
||||
aclPolicy: ACLPolicy{
|
||||
Hosts: Hosts{
|
||||
"homeNetwork": netaddr.MustParseIPPrefix("192.168.1.0/24"),
|
||||
"homeNetwork": netip.MustParsePrefix("192.168.1.0/24"),
|
||||
},
|
||||
},
|
||||
stripEmailDomain: true,
|
||||
@@ -988,7 +1069,7 @@ func Test_expandAlias(t *testing.T) {
|
||||
machines: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.1"),
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
@@ -999,7 +1080,7 @@ func Test_expandAlias(t *testing.T) {
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.2"),
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
@@ -1010,13 +1091,13 @@ func Test_expandAlias(t *testing.T) {
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.3"),
|
||||
netip.MustParseAddr("100.64.0.3"),
|
||||
},
|
||||
Namespace: Namespace{Name: "marc"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.4"),
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
@@ -1036,25 +1117,25 @@ func Test_expandAlias(t *testing.T) {
|
||||
machines: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.1"),
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.2"),
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.3"),
|
||||
netip.MustParseAddr("100.64.0.3"),
|
||||
},
|
||||
Namespace: Namespace{Name: "marc"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.4"),
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
Namespace: Namespace{Name: "mickael"},
|
||||
},
|
||||
@@ -1077,27 +1158,27 @@ func Test_expandAlias(t *testing.T) {
|
||||
machines: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.1"),
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
ForcedTags: []string{"tag:hr-webserver"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.2"),
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
ForcedTags: []string{"tag:hr-webserver"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.3"),
|
||||
netip.MustParseAddr("100.64.0.3"),
|
||||
},
|
||||
Namespace: Namespace{Name: "marc"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.4"),
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
Namespace: Namespace{Name: "mickael"},
|
||||
},
|
||||
@@ -1115,14 +1196,14 @@ func Test_expandAlias(t *testing.T) {
|
||||
machines: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.1"),
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
ForcedTags: []string{"tag:hr-webserver"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.2"),
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
@@ -1133,13 +1214,13 @@ func Test_expandAlias(t *testing.T) {
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.3"),
|
||||
netip.MustParseAddr("100.64.0.3"),
|
||||
},
|
||||
Namespace: Namespace{Name: "marc"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.4"),
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
Namespace: Namespace{Name: "mickael"},
|
||||
},
|
||||
@@ -1161,7 +1242,7 @@ func Test_expandAlias(t *testing.T) {
|
||||
machines: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.1"),
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
@@ -1172,7 +1253,7 @@ func Test_expandAlias(t *testing.T) {
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.2"),
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
@@ -1183,13 +1264,13 @@ func Test_expandAlias(t *testing.T) {
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.3"),
|
||||
netip.MustParseAddr("100.64.0.3"),
|
||||
},
|
||||
Namespace: Namespace{Name: "marc"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.4"),
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
@@ -1245,7 +1326,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
nodes: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.1"),
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
@@ -1256,7 +1337,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.2"),
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
@@ -1267,7 +1348,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.4"),
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
@@ -1277,7 +1358,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
want: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{netaddr.MustParseIP("100.64.0.4")},
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.4")},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
},
|
||||
@@ -1296,7 +1377,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
nodes: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.1"),
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
@@ -1307,7 +1388,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.2"),
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
@@ -1318,7 +1399,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.4"),
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
@@ -1328,7 +1409,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
want: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{netaddr.MustParseIP("100.64.0.4")},
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.4")},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
},
|
||||
@@ -1342,7 +1423,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
nodes: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.1"),
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
@@ -1353,14 +1434,14 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.2"),
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
ForcedTags: []string{"tag:accountant-webserver"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.4"),
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
@@ -1370,7 +1451,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
want: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{netaddr.MustParseIP("100.64.0.4")},
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.4")},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
},
|
||||
@@ -1384,7 +1465,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
nodes: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.1"),
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
@@ -1395,7 +1476,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.2"),
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
@@ -1406,7 +1487,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.4"),
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
@@ -1417,7 +1498,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
want: []Machine{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.1"),
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
@@ -1428,7 +1509,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.2"),
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
HostInfo: HostInfo{
|
||||
@@ -1439,7 +1520,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
netaddr.MustParseIP("100.64.0.4"),
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
Namespace: Namespace{Name: "joe"},
|
||||
},
|
||||
|
||||
@@ -2,20 +2,22 @@ package headscale
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/netip"
|
||||
"strings"
|
||||
|
||||
"github.com/tailscale/hujson"
|
||||
"gopkg.in/yaml.v3"
|
||||
"inet.af/netaddr"
|
||||
)
|
||||
|
||||
// ACLPolicy represents a Tailscale ACL Policy.
|
||||
type ACLPolicy struct {
|
||||
Groups Groups `json:"groups" yaml:"groups"`
|
||||
Hosts Hosts `json:"hosts" yaml:"hosts"`
|
||||
TagOwners TagOwners `json:"tagOwners" yaml:"tagOwners"`
|
||||
ACLs []ACL `json:"acls" yaml:"acls"`
|
||||
Tests []ACLTest `json:"tests" yaml:"tests"`
|
||||
Groups Groups `json:"groups" yaml:"groups"`
|
||||
Hosts Hosts `json:"hosts" yaml:"hosts"`
|
||||
TagOwners TagOwners `json:"tagOwners" yaml:"tagOwners"`
|
||||
ACLs []ACL `json:"acls" yaml:"acls"`
|
||||
Tests []ACLTest `json:"tests" yaml:"tests"`
|
||||
AutoApprovers AutoApprovers `json:"autoApprovers" yaml:"autoApprovers"`
|
||||
SSHs []SSH `json:"ssh" yaml:"ssh"`
|
||||
}
|
||||
|
||||
// ACL is a basic rule for the ACL Policy.
|
||||
@@ -30,7 +32,7 @@ type ACL struct {
|
||||
type Groups map[string][]string
|
||||
|
||||
// Hosts are alias for IP addresses or subnets.
|
||||
type Hosts map[string]netaddr.IPPrefix
|
||||
type Hosts map[string]netip.Prefix
|
||||
|
||||
// TagOwners specify what users (namespaces?) are allow to use certain tags.
|
||||
type TagOwners map[string][]string
|
||||
@@ -42,7 +44,23 @@ type ACLTest struct {
|
||||
Deny []string `json:"deny,omitempty" yaml:"deny,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON allows to parse the Hosts directly into netaddr objects.
|
||||
// AutoApprovers specify which users (namespaces?), groups or tags have their advertised routes
|
||||
// or exit node status automatically enabled.
|
||||
type AutoApprovers struct {
|
||||
Routes map[string][]string `json:"routes" yaml:"routes"`
|
||||
ExitNode []string `json:"exitNode" yaml:"exitNode"`
|
||||
}
|
||||
|
||||
// SSH controls who can ssh into which machines.
|
||||
type SSH struct {
|
||||
Action string `json:"action" yaml:"action"`
|
||||
Sources []string `json:"src" yaml:"src"`
|
||||
Destinations []string `json:"dst" yaml:"dst"`
|
||||
Users []string `json:"users" yaml:"users"`
|
||||
CheckPeriod string `json:"checkPeriod,omitempty" yaml:"checkPeriod,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON allows to parse the Hosts directly into netip objects.
|
||||
func (hosts *Hosts) UnmarshalJSON(data []byte) error {
|
||||
newHosts := Hosts{}
|
||||
hostIPPrefixMap := make(map[string]string)
|
||||
@@ -60,7 +78,7 @@ func (hosts *Hosts) UnmarshalJSON(data []byte) error {
|
||||
if !strings.Contains(prefixStr, "/") {
|
||||
prefixStr += "/32"
|
||||
}
|
||||
prefix, err := netaddr.ParseIPPrefix(prefixStr)
|
||||
prefix, err := netip.ParsePrefix(prefixStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -71,7 +89,7 @@ func (hosts *Hosts) UnmarshalJSON(data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalYAML allows to parse the Hosts directly into netaddr objects.
|
||||
// UnmarshalYAML allows to parse the Hosts directly into netip objects.
|
||||
func (hosts *Hosts) UnmarshalYAML(data []byte) error {
|
||||
newHosts := Hosts{}
|
||||
hostIPPrefixMap := make(map[string]string)
|
||||
@@ -81,7 +99,7 @@ func (hosts *Hosts) UnmarshalYAML(data []byte) error {
|
||||
return err
|
||||
}
|
||||
for host, prefixStr := range hostIPPrefixMap {
|
||||
prefix, err := netaddr.ParseIPPrefix(prefixStr)
|
||||
prefix, err := netip.ParsePrefix(prefixStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -100,3 +118,28 @@ func (policy ACLPolicy) IsZero() bool {
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns the list of autoApproving namespaces, groups or tags for a given IPPrefix.
|
||||
func (autoApprovers *AutoApprovers) GetRouteApprovers(
|
||||
prefix netip.Prefix,
|
||||
) ([]string, error) {
|
||||
if prefix.Bits() == 0 {
|
||||
return autoApprovers.ExitNode, nil // 0.0.0.0/0, ::/0 or equivalent
|
||||
}
|
||||
|
||||
approverAliases := []string{}
|
||||
|
||||
for autoApprovedPrefix, autoApproverAliases := range autoApprovers.Routes {
|
||||
autoApprovedPrefix, err := netip.ParsePrefix(autoApprovedPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if prefix.Bits() >= autoApprovedPrefix.Bits() &&
|
||||
autoApprovedPrefix.Contains(prefix.Masked().Addr()) {
|
||||
approverAliases = append(approverAliases, autoApproverAliases...)
|
||||
}
|
||||
}
|
||||
|
||||
return approverAliases, nil
|
||||
}
|
||||
|
||||
816
api.go
816
api.go
@@ -2,21 +2,13 @@ package headscale
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
@@ -61,7 +53,7 @@ func (h *Headscale) HealthHandler(
|
||||
}
|
||||
}
|
||||
|
||||
if err := h.pingDB(); err != nil {
|
||||
if err := h.pingDB(req.Context()); err != nil {
|
||||
respond(err)
|
||||
|
||||
return
|
||||
@@ -70,23 +62,6 @@ func (h *Headscale) HealthHandler(
|
||||
respond(nil)
|
||||
}
|
||||
|
||||
// KeyHandler provides the Headscale pub key
|
||||
// Listens in /key.
|
||||
func (h *Headscale) KeyHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err := writer.Write([]byte(MachinePublicKeyStripPrefix(h.privateKey.Public())))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
}
|
||||
|
||||
type registerWebAPITemplateConfig struct {
|
||||
Key string
|
||||
}
|
||||
@@ -119,7 +94,34 @@ func (h *Headscale) RegisterWebAPI(
|
||||
) {
|
||||
vars := mux.Vars(req)
|
||||
nodeKeyStr, ok := vars["nkey"]
|
||||
if !ok || nodeKeyStr == "" {
|
||||
|
||||
if !NodePublicKeyRegex.Match([]byte(nodeKeyStr)) {
|
||||
log.Warn().Str("node_key", nodeKeyStr).Msg("Invalid node key passed to registration url")
|
||||
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusUnauthorized)
|
||||
_, err := writer.Write([]byte("Unauthorized"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// We need to make sure we dont open for XSS style injections, if the parameter that
|
||||
// is passed as a key is not parsable/validated as a NodePublic key, then fail to render
|
||||
// the template and log an error.
|
||||
var nodeKey key.NodePublic
|
||||
err := nodeKey.UnmarshalText(
|
||||
[]byte(NodePublicKeyEnsurePrefix(nodeKeyStr)),
|
||||
)
|
||||
|
||||
if !ok || nodeKeyStr == "" || err != nil {
|
||||
log.Warn().Err(err).Msg("Failed to parse incoming nodekey")
|
||||
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusBadRequest)
|
||||
_, err := writer.Write([]byte("Wrong params"))
|
||||
@@ -156,7 +158,7 @@ func (h *Headscale) RegisterWebAPI(
|
||||
|
||||
writer.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err := writer.Write(content.Bytes())
|
||||
_, err = writer.Write(content.Bytes())
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
@@ -164,761 +166,3 @@ func (h *Headscale) RegisterWebAPI(
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
}
|
||||
|
||||
// RegistrationHandler handles the actual registration process of a machine
|
||||
// Endpoint /machine/:mkey.
|
||||
func (h *Headscale) RegistrationHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
vars := mux.Vars(req)
|
||||
machineKeyStr, ok := vars["mkey"]
|
||||
if !ok || machineKeyStr == "" {
|
||||
log.Error().
|
||||
Str("handler", "RegistrationHandler").
|
||||
Msg("No machine ID in request")
|
||||
http.Error(writer, "No machine ID in request", http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
body, _ := io.ReadAll(req.Body)
|
||||
|
||||
var machineKey key.MachinePublic
|
||||
err := machineKey.UnmarshalText([]byte(MachinePublicKeyEnsurePrefix(machineKeyStr)))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot parse machine key")
|
||||
machineRegistrations.WithLabelValues("unknown", "web", "error", "unknown").Inc()
|
||||
http.Error(writer, "Cannot parse machine key", http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
registerRequest := tailcfg.RegisterRequest{}
|
||||
err = decode(body, ®isterRequest, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot decode message")
|
||||
machineRegistrations.WithLabelValues("unknown", "web", "error", "unknown").Inc()
|
||||
http.Error(writer, "Cannot decode message", http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now().UTC()
|
||||
machine, err := h.GetMachineByMachineKey(machineKey)
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
machineKeyStr := MachinePublicKeyStripPrefix(machineKey)
|
||||
|
||||
// If the machine has AuthKey set, handle registration via PreAuthKeys
|
||||
if registerRequest.Auth.AuthKey != "" {
|
||||
h.handleAuthKey(writer, req, machineKey, registerRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Check if the node is waiting for interactive login.
|
||||
//
|
||||
// TODO(juan): We could use this field to improve our protocol implementation,
|
||||
// and hold the request until the client closes it, or the interactive
|
||||
// login is completed (i.e., the user registers the machine).
|
||||
// This is not implemented yet, as it is no strictly required. The only side-effect
|
||||
// is that the client will hammer headscale with requests until it gets a
|
||||
// successful RegisterResponse.
|
||||
if registerRequest.Followup != "" {
|
||||
if _, ok := h.registrationCache.Get(NodePublicKeyStripPrefix(registerRequest.NodeKey)); ok {
|
||||
log.Debug().
|
||||
Caller().
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Str("node_key", registerRequest.NodeKey.ShortString()).
|
||||
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
|
||||
Str("follow_up", registerRequest.Followup).
|
||||
Msg("Machine is waiting for interactive login")
|
||||
|
||||
ticker := time.NewTicker(registrationHoldoff)
|
||||
select {
|
||||
case <-req.Context().Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
h.handleMachineRegistrationNew(writer, req, machineKey, registerRequest)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Caller().
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Str("node_key", registerRequest.NodeKey.ShortString()).
|
||||
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
|
||||
Str("follow_up", registerRequest.Followup).
|
||||
Msg("New machine not yet in the database")
|
||||
|
||||
givenName, err := h.GenerateGivenName(registerRequest.Hostinfo.Hostname)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "RegistrationHandler").
|
||||
Str("hostinfo.name", registerRequest.Hostinfo.Hostname).
|
||||
Err(err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// The machine did not have a key to authenticate, which means
|
||||
// that we rely on a method that calls back some how (OpenID or CLI)
|
||||
// We create the machine and then keep it around until a callback
|
||||
// happens
|
||||
newMachine := Machine{
|
||||
MachineKey: machineKeyStr,
|
||||
Hostname: registerRequest.Hostinfo.Hostname,
|
||||
GivenName: givenName,
|
||||
NodeKey: NodePublicKeyStripPrefix(registerRequest.NodeKey),
|
||||
LastSeen: &now,
|
||||
Expiry: &time.Time{},
|
||||
}
|
||||
|
||||
if !registerRequest.Expiry.IsZero() {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Time("expiry", registerRequest.Expiry).
|
||||
Msg("Non-zero expiry time requested")
|
||||
newMachine.Expiry = ®isterRequest.Expiry
|
||||
}
|
||||
|
||||
h.registrationCache.Set(
|
||||
newMachine.NodeKey,
|
||||
newMachine,
|
||||
registerCacheExpiration,
|
||||
)
|
||||
|
||||
h.handleMachineRegistrationNew(writer, req, machineKey, registerRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// The machine is already registered, so we need to pass through reauth or key update.
|
||||
if machine != nil {
|
||||
// If the NodeKey stored in headscale is the same as the key presented in a registration
|
||||
// request, then we have a node that is either:
|
||||
// - Trying to log out (sending a expiry in the past)
|
||||
// - A valid, registered machine, looking for the node map
|
||||
// - Expired machine wanting to reauthenticate
|
||||
if machine.NodeKey == NodePublicKeyStripPrefix(registerRequest.NodeKey) {
|
||||
// The client sends an Expiry in the past if the client is requesting to expire the key (aka logout)
|
||||
// https://github.com/tailscale/tailscale/blob/main/tailcfg/tailcfg.go#L648
|
||||
if !registerRequest.Expiry.IsZero() &&
|
||||
registerRequest.Expiry.UTC().Before(now) {
|
||||
h.handleMachineLogOut(writer, req, machineKey, *machine)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// If machine is not expired, and is register, we have a already accepted this machine,
|
||||
// let it proceed with a valid registration
|
||||
if !machine.isExpired() {
|
||||
h.handleMachineValidRegistration(writer, req, machineKey, *machine)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The NodeKey we have matches OldNodeKey, which means this is a refresh after a key expiration
|
||||
if machine.NodeKey == NodePublicKeyStripPrefix(registerRequest.OldNodeKey) &&
|
||||
!machine.isExpired() {
|
||||
h.handleMachineRefreshKey(
|
||||
writer,
|
||||
req,
|
||||
machineKey,
|
||||
registerRequest,
|
||||
*machine,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// The machine has expired
|
||||
h.handleMachineExpired(writer, req, machineKey, registerRequest, *machine)
|
||||
|
||||
machine.Expiry = &time.Time{}
|
||||
h.registrationCache.Set(
|
||||
NodePublicKeyStripPrefix(registerRequest.NodeKey),
|
||||
*machine,
|
||||
registerCacheExpiration,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) getMapResponse(
|
||||
machineKey key.MachinePublic,
|
||||
mapRequest tailcfg.MapRequest,
|
||||
machine *Machine,
|
||||
) ([]byte, error) {
|
||||
log.Trace().
|
||||
Str("func", "getMapResponse").
|
||||
Str("machine", mapRequest.Hostinfo.Hostname).
|
||||
Msg("Creating Map response")
|
||||
node, err := machine.toNode(h.cfg.BaseDomain, h.cfg.DNSConfig, true)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "getMapResponse").
|
||||
Err(err).
|
||||
Msg("Cannot convert to node")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
peers, err := h.getValidPeers(machine)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "getMapResponse").
|
||||
Err(err).
|
||||
Msg("Cannot fetch peers")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
profiles := getMapResponseUserProfiles(*machine, peers)
|
||||
|
||||
nodePeers, err := peers.toNodes(h.cfg.BaseDomain, h.cfg.DNSConfig, true)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "getMapResponse").
|
||||
Err(err).
|
||||
Msg("Failed to convert peers to Tailscale nodes")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dnsConfig := getMapResponseDNSConfig(
|
||||
h.cfg.DNSConfig,
|
||||
h.cfg.BaseDomain,
|
||||
*machine,
|
||||
peers,
|
||||
)
|
||||
|
||||
resp := tailcfg.MapResponse{
|
||||
KeepAlive: false,
|
||||
Node: node,
|
||||
Peers: nodePeers,
|
||||
DNSConfig: dnsConfig,
|
||||
Domain: h.cfg.BaseDomain,
|
||||
PacketFilter: h.aclRules,
|
||||
DERPMap: h.DERPMap,
|
||||
UserProfiles: profiles,
|
||||
Debug: &tailcfg.Debug{
|
||||
DisableLogTail: !h.cfg.LogTail.Enabled,
|
||||
RandomizeClientPort: h.cfg.RandomizeClientPort,
|
||||
},
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Str("func", "getMapResponse").
|
||||
Str("machine", mapRequest.Hostinfo.Hostname).
|
||||
// Interface("payload", resp).
|
||||
Msgf("Generated map response: %s", tailMapResponseToString(resp))
|
||||
|
||||
var respBody []byte
|
||||
if mapRequest.Compress == "zstd" {
|
||||
src, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "getMapResponse").
|
||||
Err(err).
|
||||
Msg("Failed to marshal response for the client")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
encoder, _ := zstd.NewWriter(nil)
|
||||
srcCompressed := encoder.EncodeAll(src, nil)
|
||||
respBody = h.privateKey.SealTo(machineKey, srcCompressed)
|
||||
} else {
|
||||
respBody, err = encode(resp, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// declare the incoming size on the first 4 bytes
|
||||
data := make([]byte, reservedResponseHeaderSize)
|
||||
binary.LittleEndian.PutUint32(data, uint32(len(respBody)))
|
||||
data = append(data, respBody...)
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) getMapKeepAliveResponse(
|
||||
machineKey key.MachinePublic,
|
||||
mapRequest tailcfg.MapRequest,
|
||||
) ([]byte, error) {
|
||||
mapResponse := tailcfg.MapResponse{
|
||||
KeepAlive: true,
|
||||
}
|
||||
var respBody []byte
|
||||
var err error
|
||||
if mapRequest.Compress == "zstd" {
|
||||
src, err := json.Marshal(mapResponse)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "getMapKeepAliveResponse").
|
||||
Err(err).
|
||||
Msg("Failed to marshal keepalive response for the client")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
encoder, _ := zstd.NewWriter(nil)
|
||||
srcCompressed := encoder.EncodeAll(src, nil)
|
||||
respBody = h.privateKey.SealTo(machineKey, srcCompressed)
|
||||
} else {
|
||||
respBody, err = encode(mapResponse, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
data := make([]byte, reservedResponseHeaderSize)
|
||||
binary.LittleEndian.PutUint32(data, uint32(len(respBody)))
|
||||
data = append(data, respBody...)
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) handleMachineLogOut(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
machineKey key.MachinePublic,
|
||||
machine Machine,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
log.Info().
|
||||
Str("machine", machine.Hostname).
|
||||
Msg("Client requested logout")
|
||||
|
||||
err := h.ExpireMachine(&machine)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "handleMachineLogOut").
|
||||
Err(err).
|
||||
Msg("Failed to expire machine")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
resp.AuthURL = ""
|
||||
resp.MachineAuthorized = false
|
||||
resp.User = *machine.Namespace.toUser()
|
||||
respBody, err := encode(resp, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) handleMachineValidRegistration(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
machineKey key.MachinePublic,
|
||||
machine Machine,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
// The machine registration is valid, respond with redirect to /map
|
||||
log.Debug().
|
||||
Str("machine", machine.Hostname).
|
||||
Msg("Client is registered and we have the current NodeKey. All clear to /map")
|
||||
|
||||
resp.AuthURL = ""
|
||||
resp.MachineAuthorized = true
|
||||
resp.User = *machine.Namespace.toUser()
|
||||
resp.Login = *machine.Namespace.toLogin()
|
||||
|
||||
respBody, err := encode(resp, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
machineRegistrations.WithLabelValues("update", "web", "error", machine.Namespace.Name).
|
||||
Inc()
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
machineRegistrations.WithLabelValues("update", "web", "success", machine.Namespace.Name).
|
||||
Inc()
|
||||
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) handleMachineExpired(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
machineKey key.MachinePublic,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
machine Machine,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
// The client has registered before, but has expired
|
||||
log.Debug().
|
||||
Str("machine", machine.Hostname).
|
||||
Msg("Machine registration has expired. Sending a authurl to register")
|
||||
|
||||
if registerRequest.Auth.AuthKey != "" {
|
||||
h.handleAuthKey(writer, req, machineKey, registerRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if h.cfg.OIDC.Issuer != "" {
|
||||
resp.AuthURL = fmt.Sprintf("%s/oidc/register/%s",
|
||||
strings.TrimSuffix(h.cfg.ServerURL, "/"),
|
||||
NodePublicKeyStripPrefix(registerRequest.NodeKey))
|
||||
} else {
|
||||
resp.AuthURL = fmt.Sprintf("%s/register/%s",
|
||||
strings.TrimSuffix(h.cfg.ServerURL, "/"),
|
||||
NodePublicKeyStripPrefix(registerRequest.NodeKey))
|
||||
}
|
||||
|
||||
respBody, err := encode(resp, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
machineRegistrations.WithLabelValues("reauth", "web", "error", machine.Namespace.Name).
|
||||
Inc()
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
machineRegistrations.WithLabelValues("reauth", "web", "success", machine.Namespace.Name).
|
||||
Inc()
|
||||
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) handleMachineRefreshKey(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
machineKey key.MachinePublic,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
machine Machine,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
log.Debug().
|
||||
Str("machine", machine.Hostname).
|
||||
Msg("We have the OldNodeKey in the database. This is a key refresh")
|
||||
machine.NodeKey = NodePublicKeyStripPrefix(registerRequest.NodeKey)
|
||||
|
||||
if err := h.db.Save(&machine).Error; err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to update machine key in the database")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
resp.AuthURL = ""
|
||||
resp.User = *machine.Namespace.toUser()
|
||||
respBody, err := encode(resp, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) handleMachineRegistrationNew(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
machineKey key.MachinePublic,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
// The machine registration is new, redirect the client to the registration URL
|
||||
log.Debug().
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Msg("The node seems to be new, sending auth url")
|
||||
if h.cfg.OIDC.Issuer != "" {
|
||||
resp.AuthURL = fmt.Sprintf(
|
||||
"%s/oidc/register/%s",
|
||||
strings.TrimSuffix(h.cfg.ServerURL, "/"),
|
||||
NodePublicKeyStripPrefix(registerRequest.NodeKey),
|
||||
)
|
||||
} else {
|
||||
resp.AuthURL = fmt.Sprintf("%s/register/%s",
|
||||
strings.TrimSuffix(h.cfg.ServerURL, "/"),
|
||||
NodePublicKeyStripPrefix(registerRequest.NodeKey))
|
||||
}
|
||||
|
||||
respBody, err := encode(resp, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: check if any locks are needed around IP allocation.
|
||||
func (h *Headscale) handleAuthKey(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
machineKey key.MachinePublic,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
) {
|
||||
machineKeyStr := MachinePublicKeyStripPrefix(machineKey)
|
||||
|
||||
log.Debug().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Msgf("Processing auth key for %s", registerRequest.Hostinfo.Hostname)
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
pak, err := h.checkKeyValidity(registerRequest.Auth.AuthKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Err(err).
|
||||
Msg("Failed authentication via AuthKey")
|
||||
resp.MachineAuthorized = false
|
||||
respBody, err := encode(resp, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
machineRegistrations.WithLabelValues("new", RegisterMethodAuthKey, "error", pak.Namespace.Name).
|
||||
Inc()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusUnauthorized)
|
||||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Msg("Failed authentication via AuthKey")
|
||||
|
||||
if pak != nil {
|
||||
machineRegistrations.WithLabelValues("new", RegisterMethodAuthKey, "error", pak.Namespace.Name).
|
||||
Inc()
|
||||
} else {
|
||||
machineRegistrations.WithLabelValues("new", RegisterMethodAuthKey, "error", "unknown").Inc()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Msg("Authentication key was valid, proceeding to acquire IP addresses")
|
||||
|
||||
nodeKey := NodePublicKeyStripPrefix(registerRequest.NodeKey)
|
||||
|
||||
// retrieve machine information if it exist
|
||||
// The error is not important, because if it does not
|
||||
// exist, then this is a new machine and we will move
|
||||
// on to registration.
|
||||
machine, _ := h.GetMachineByMachineKey(machineKey)
|
||||
if machine != nil {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("machine", machine.Hostname).
|
||||
Msg("machine already registered, refreshing with new auth key")
|
||||
|
||||
machine.NodeKey = nodeKey
|
||||
machine.AuthKeyID = uint(pak.ID)
|
||||
err := h.RefreshMachine(machine, registerRequest.Expiry)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("machine", machine.Hostname).
|
||||
Err(err).
|
||||
Msg("Failed to refresh machine")
|
||||
|
||||
return
|
||||
}
|
||||
} else {
|
||||
now := time.Now().UTC()
|
||||
|
||||
givenName, err := h.GenerateGivenName(registerRequest.Hostinfo.Hostname)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "RegistrationHandler").
|
||||
Str("hostinfo.name", registerRequest.Hostinfo.Hostname).
|
||||
Err(err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
machineToRegister := Machine{
|
||||
Hostname: registerRequest.Hostinfo.Hostname,
|
||||
GivenName: givenName,
|
||||
NamespaceID: pak.Namespace.ID,
|
||||
MachineKey: machineKeyStr,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
Expiry: ®isterRequest.Expiry,
|
||||
NodeKey: nodeKey,
|
||||
LastSeen: &now,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
}
|
||||
|
||||
machine, err = h.RegisterMachine(
|
||||
machineToRegister,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("could not register machine")
|
||||
machineRegistrations.WithLabelValues("new", RegisterMethodAuthKey, "error", pak.Namespace.Name).
|
||||
Inc()
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err = h.UsePreAuthKey(pak)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to use pre-auth key")
|
||||
machineRegistrations.WithLabelValues("new", RegisterMethodAuthKey, "error", pak.Namespace.Name).
|
||||
Inc()
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
resp.MachineAuthorized = true
|
||||
resp.User = *pak.Namespace.toUser()
|
||||
respBody, err := encode(resp, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
machineRegistrations.WithLabelValues("new", RegisterMethodAuthKey, "error", pak.Namespace.Name).
|
||||
Inc()
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
machineRegistrations.WithLabelValues("new", RegisterMethodAuthKey, "success", pak.Namespace.Name).
|
||||
Inc()
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Str("ips", strings.Join(machine.IPAddresses.ToStringSlice(), ", ")).
|
||||
Msg("Successfully authenticated via AuthKey")
|
||||
}
|
||||
|
||||
81
api_common.go
Normal file
81
api_common.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"github.com/rs/zerolog/log"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
func (h *Headscale) generateMapResponse(
|
||||
mapRequest tailcfg.MapRequest,
|
||||
machine *Machine,
|
||||
) (*tailcfg.MapResponse, error) {
|
||||
log.Trace().
|
||||
Str("func", "generateMapResponse").
|
||||
Str("machine", mapRequest.Hostinfo.Hostname).
|
||||
Msg("Creating Map response")
|
||||
node, err := h.toNode(*machine, h.cfg.BaseDomain, h.cfg.DNSConfig)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "generateMapResponse").
|
||||
Err(err).
|
||||
Msg("Cannot convert to node")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
peers, err := h.getValidPeers(machine)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "generateMapResponse").
|
||||
Err(err).
|
||||
Msg("Cannot fetch peers")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
profiles := h.getMapResponseUserProfiles(*machine, peers)
|
||||
|
||||
nodePeers, err := h.toNodes(peers, h.cfg.BaseDomain, h.cfg.DNSConfig)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "generateMapResponse").
|
||||
Err(err).
|
||||
Msg("Failed to convert peers to Tailscale nodes")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dnsConfig := getMapResponseDNSConfig(
|
||||
h.cfg.DNSConfig,
|
||||
h.cfg.BaseDomain,
|
||||
*machine,
|
||||
peers,
|
||||
)
|
||||
|
||||
resp := tailcfg.MapResponse{
|
||||
KeepAlive: false,
|
||||
Node: node,
|
||||
Peers: nodePeers,
|
||||
DNSConfig: dnsConfig,
|
||||
Domain: h.cfg.BaseDomain,
|
||||
PacketFilter: h.aclRules,
|
||||
SSHPolicy: h.sshPolicy,
|
||||
DERPMap: h.DERPMap,
|
||||
UserProfiles: profiles,
|
||||
Debug: &tailcfg.Debug{
|
||||
DisableLogTail: !h.cfg.LogTail.Enabled,
|
||||
RandomizeClientPort: h.cfg.RandomizeClientPort,
|
||||
},
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Str("func", "generateMapResponse").
|
||||
Str("machine", mapRequest.Hostinfo.Hostname).
|
||||
// Interface("payload", resp).
|
||||
Msgf("Generated map response: %s", tailMapResponseToString(resp))
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
144
app.go
144
app.go
@@ -11,6 +11,7 @@ import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
@@ -18,13 +19,13 @@ import (
|
||||
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
"github.com/gorilla/mux"
|
||||
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
grpcMiddleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/patrickmn/go-cache"
|
||||
zerolog "github.com/philip-bui/grpc-zerolog"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/puzpuzpuz/xsync"
|
||||
"github.com/puzpuzpuz/xsync/v2"
|
||||
zl "github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"golang.org/x/crypto/acme"
|
||||
@@ -72,20 +73,22 @@ const (
|
||||
|
||||
// Headscale represents the base app of the service.
|
||||
type Headscale struct {
|
||||
cfg *Config
|
||||
db *gorm.DB
|
||||
dbString string
|
||||
dbType string
|
||||
dbDebug bool
|
||||
privateKey *key.MachinePrivate
|
||||
cfg *Config
|
||||
db *gorm.DB
|
||||
dbString string
|
||||
dbType string
|
||||
dbDebug bool
|
||||
privateKey *key.MachinePrivate
|
||||
noisePrivateKey *key.MachinePrivate
|
||||
|
||||
DERPMap *tailcfg.DERPMap
|
||||
DERPServer *DERPServer
|
||||
|
||||
aclPolicy *ACLPolicy
|
||||
aclRules []tailcfg.FilterRule
|
||||
sshPolicy *tailcfg.SSHPolicy
|
||||
|
||||
lastStateChange *xsync.MapOf[time.Time]
|
||||
lastStateChange *xsync.MapOf[string, time.Time]
|
||||
|
||||
oidcProvider *oidc.Provider
|
||||
oauth2Config *oauth2.Config
|
||||
@@ -98,33 +101,22 @@ type Headscale struct {
|
||||
pollNetMapStreamWG sync.WaitGroup
|
||||
}
|
||||
|
||||
// Look up the TLS constant relative to user-supplied TLS client
|
||||
// authentication mode. If an unknown mode is supplied, the default
|
||||
// value, tls.RequireAnyClientCert, is returned. The returned boolean
|
||||
// indicates if the supplied mode was valid.
|
||||
func LookupTLSClientAuthMode(mode string) (tls.ClientAuthType, bool) {
|
||||
switch mode {
|
||||
case DisabledClientAuth:
|
||||
// Client cert is _not_ required.
|
||||
return tls.NoClientCert, true
|
||||
case RelaxedClientAuth:
|
||||
// Client cert required, but _not verified_.
|
||||
return tls.RequireAnyClientCert, true
|
||||
case EnforcedClientAuth:
|
||||
// Client cert is _required and verified_.
|
||||
return tls.RequireAndVerifyClientCert, true
|
||||
default:
|
||||
// Return the default when an unknown value is supplied.
|
||||
return tls.RequireAnyClientCert, false
|
||||
}
|
||||
}
|
||||
|
||||
func NewHeadscale(cfg *Config) (*Headscale, error) {
|
||||
privKey, err := readOrCreatePrivateKey(cfg.PrivateKeyPath)
|
||||
privateKey, err := readOrCreatePrivateKey(cfg.PrivateKeyPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read or create private key: %w", err)
|
||||
}
|
||||
|
||||
// TS2021 requires to have a different key from the legacy protocol.
|
||||
noisePrivateKey, err := readOrCreatePrivateKey(cfg.NoisePrivateKeyPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read or create Noise protocol private key: %w", err)
|
||||
}
|
||||
|
||||
if privateKey.Equal(*noisePrivateKey) {
|
||||
return nil, fmt.Errorf("private key and noise private key are the same: %w", err)
|
||||
}
|
||||
|
||||
var dbString string
|
||||
switch cfg.DBtype {
|
||||
case Postgres:
|
||||
@@ -135,8 +127,12 @@ func NewHeadscale(cfg *Config) (*Headscale, error) {
|
||||
cfg.DBuser,
|
||||
)
|
||||
|
||||
if !cfg.DBssl {
|
||||
dbString += " sslmode=disable"
|
||||
if sslEnabled, err := strconv.ParseBool(cfg.DBssl); err == nil {
|
||||
if !sslEnabled {
|
||||
dbString += " sslmode=disable"
|
||||
}
|
||||
} else {
|
||||
dbString += fmt.Sprintf(" sslmode=%s", cfg.DBssl)
|
||||
}
|
||||
|
||||
if cfg.DBport != 0 {
|
||||
@@ -161,7 +157,8 @@ func NewHeadscale(cfg *Config) (*Headscale, error) {
|
||||
cfg: cfg,
|
||||
dbType: cfg.DBtype,
|
||||
dbString: dbString,
|
||||
privateKey: privKey,
|
||||
privateKey: privateKey,
|
||||
noisePrivateKey: noisePrivateKey,
|
||||
aclRules: tailcfg.FilterAllowAll, // default allowall
|
||||
registrationCache: registrationCache,
|
||||
pollNetMapStreamWG: sync.WaitGroup{},
|
||||
@@ -175,7 +172,11 @@ func NewHeadscale(cfg *Config) (*Headscale, error) {
|
||||
if cfg.OIDC.Issuer != "" {
|
||||
err = app.initOIDC()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if cfg.OIDC.OnlyStartIfOIDCIsAvailable {
|
||||
return nil, err
|
||||
} else {
|
||||
log.Warn().Err(err).Msg("failed to set up OIDC provider, falling back to CLI based authentication")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -216,6 +217,16 @@ func (h *Headscale) expireEphemeralNodes(milliSeconds int64) {
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) failoverSubnetRoutes(milliSeconds int64) {
|
||||
ticker := time.NewTicker(time.Duration(milliSeconds) * time.Millisecond)
|
||||
for range ticker.C {
|
||||
err := h.handlePrimarySubnetFailover()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("failed to handle primary subnet failover")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) expireEphemeralNodesWorker() {
|
||||
namespaces, err := h.ListNamespaces()
|
||||
if err != nil {
|
||||
@@ -257,7 +268,7 @@ func (h *Headscale) expireEphemeralNodesWorker() {
|
||||
}
|
||||
|
||||
if expiredFound {
|
||||
h.setLastStateChangeToNow(namespace.Name)
|
||||
h.setLastStateChangeToNow()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -425,19 +436,24 @@ func (h *Headscale) ensureUnixSocketIsAbsent() error {
|
||||
func (h *Headscale) createRouter(grpcMux *runtime.ServeMux) *mux.Router {
|
||||
router := mux.NewRouter()
|
||||
|
||||
router.HandleFunc(ts2021UpgradePath, h.NoiseUpgradeHandler).Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/health", h.HealthHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/key", h.KeyHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/register/{nkey}", h.RegisterWebAPI).Methods(http.MethodGet)
|
||||
router.HandleFunc("/machine/{mkey}/map", h.PollNetMapHandler).Methods(http.MethodPost)
|
||||
router.HandleFunc("/machine/{mkey}", h.RegistrationHandler).Methods(http.MethodPost)
|
||||
h.addLegacyHandlers(router)
|
||||
|
||||
router.HandleFunc("/oidc/register/{nkey}", h.RegisterOIDC).Methods(http.MethodGet)
|
||||
router.HandleFunc("/oidc/callback", h.OIDCCallback).Methods(http.MethodGet)
|
||||
router.HandleFunc("/apple", h.AppleConfigMessage).Methods(http.MethodGet)
|
||||
router.HandleFunc("/apple/{platform}", h.ApplePlatformConfig).Methods(http.MethodGet)
|
||||
router.HandleFunc("/apple/{platform}", h.ApplePlatformConfig).
|
||||
Methods(http.MethodGet)
|
||||
router.HandleFunc("/windows", h.WindowsConfigMessage).Methods(http.MethodGet)
|
||||
router.HandleFunc("/windows/tailscale.reg", h.WindowsRegConfig).Methods(http.MethodGet)
|
||||
router.HandleFunc("/windows/tailscale.reg", h.WindowsRegConfig).
|
||||
Methods(http.MethodGet)
|
||||
router.HandleFunc("/swagger", SwaggerUI).Methods(http.MethodGet)
|
||||
router.HandleFunc("/swagger/v1/openapiv2.json", SwaggerAPIv1).Methods(http.MethodGet)
|
||||
router.HandleFunc("/swagger/v1/openapiv2.json", SwaggerAPIv1).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
if h.cfg.DERP.ServerEnabled {
|
||||
router.HandleFunc("/derp", h.DERPHandler)
|
||||
@@ -479,6 +495,8 @@ func (h *Headscale) Serve() error {
|
||||
|
||||
go h.expireEphemeralNodes(updateInterval)
|
||||
|
||||
go h.failoverSubnetRoutes(updateInterval)
|
||||
|
||||
if zl.GlobalLevel() == zl.TraceLevel {
|
||||
zerolog.RespLog = true
|
||||
} else {
|
||||
@@ -572,7 +590,7 @@ func (h *Headscale) Serve() error {
|
||||
|
||||
grpcOptions := []grpc.ServerOption{
|
||||
grpc.UnaryInterceptor(
|
||||
grpc_middleware.ChainUnaryServer(
|
||||
grpcMiddleware.ChainUnaryServer(
|
||||
h.grpcAuthenticationInterceptor,
|
||||
zerolog.NewUnaryServerInterceptor(),
|
||||
),
|
||||
@@ -607,7 +625,8 @@ func (h *Headscale) Serve() error {
|
||||
//
|
||||
// HTTP setup
|
||||
//
|
||||
|
||||
// This is the regular router that we expose
|
||||
// over our main Addr. It also serves the legacy Tailcale API
|
||||
router := h.createRouter(grpcGatewayMux)
|
||||
|
||||
httpServer := &http.Server{
|
||||
@@ -784,10 +803,18 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) {
|
||||
// Configuration via autocert with HTTP-01. This requires listening on
|
||||
// port 80 for the certificate validation in addition to the headscale
|
||||
// service, which can be configured to run on any other port.
|
||||
|
||||
server := &http.Server{
|
||||
Addr: h.cfg.TLS.LetsEncrypt.Listen,
|
||||
Handler: certManager.HTTPHandler(http.HandlerFunc(h.redirect)),
|
||||
ReadTimeout: HTTPReadTimeout,
|
||||
}
|
||||
|
||||
go func() {
|
||||
err := server.ListenAndServe()
|
||||
log.Fatal().
|
||||
Caller().
|
||||
Err(http.ListenAndServe(h.cfg.TLS.LetsEncrypt.Listen, certManager.HTTPHandler(http.HandlerFunc(h.redirect)))).
|
||||
Err(err).
|
||||
Msg("failed to set up a HTTP server")
|
||||
}()
|
||||
|
||||
@@ -807,12 +834,7 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) {
|
||||
log.Warn().Msg("Listening with TLS but ServerURL does not start with https://")
|
||||
}
|
||||
|
||||
log.Info().Msg(fmt.Sprintf(
|
||||
"Client authentication (mTLS) is \"%s\". See the docs to learn about configuring this setting.",
|
||||
h.cfg.TLS.ClientAuthMode))
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
ClientAuth: h.cfg.TLS.ClientAuthMode,
|
||||
NextProtos: []string{"http/1.1"},
|
||||
Certificates: make([]tls.Certificate, 1),
|
||||
MinVersion: tls.VersionTLS12,
|
||||
@@ -824,38 +846,36 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) setLastStateChangeToNow(namespaces ...string) {
|
||||
func (h *Headscale) setLastStateChangeToNow() {
|
||||
var err error
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
if len(namespaces) == 0 {
|
||||
namespaces, err = h.ListNamespacesStr()
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("failed to fetch all namespaces, failing to update last changed state.")
|
||||
}
|
||||
namespaces, err := h.ListNamespaces()
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("failed to fetch all namespaces, failing to update last changed state.")
|
||||
}
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
lastStateUpdate.WithLabelValues(namespace, "headscale").Set(float64(now.Unix()))
|
||||
lastStateUpdate.WithLabelValues(namespace.Name, "headscale").Set(float64(now.Unix()))
|
||||
if h.lastStateChange == nil {
|
||||
h.lastStateChange = xsync.NewMapOf[time.Time]()
|
||||
}
|
||||
h.lastStateChange.Store(namespace, now)
|
||||
h.lastStateChange.Store(namespace.Name, now)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) getLastStateChange(namespaces ...string) time.Time {
|
||||
func (h *Headscale) getLastStateChange(namespaces ...Namespace) time.Time {
|
||||
times := []time.Time{}
|
||||
|
||||
// getLastStateChange takes a list of namespaces as a "filter", if no namespaces
|
||||
// are past, then use the entier list of namespaces and look for the last update
|
||||
if len(namespaces) > 0 {
|
||||
for _, namespace := range namespaces {
|
||||
if lastChange, ok := h.lastStateChange.Load(namespace); ok {
|
||||
if lastChange, ok := h.lastStateChange.Load(namespace.Name); ok {
|
||||
times = append(times, lastChange)
|
||||
}
|
||||
}
|
||||
|
||||
23
app_test.go
23
app_test.go
@@ -1,11 +1,11 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
"inet.af/netaddr"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) {
|
||||
@@ -39,8 +39,8 @@ func (s *Suite) ResetDB(c *check.C) {
|
||||
c.Fatal(err)
|
||||
}
|
||||
cfg := Config{
|
||||
IPPrefixes: []netaddr.IPPrefix{
|
||||
netaddr.MustParseIPPrefix("10.27.0.0/23"),
|
||||
IPPrefixes: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.27.0.0/23"),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -59,20 +59,3 @@ func (s *Suite) ResetDB(c *check.C) {
|
||||
}
|
||||
app.db = db
|
||||
}
|
||||
|
||||
// Enusre an error is returned when an invalid auth mode
|
||||
// is supplied.
|
||||
func (s *Suite) TestInvalidClientAuthMode(c *check.C) {
|
||||
_, isValid := LookupTLSClientAuthMode("invalid")
|
||||
c.Assert(isValid, check.Equals, false)
|
||||
}
|
||||
|
||||
// Ensure that all client auth modes return a nil error.
|
||||
func (s *Suite) TestAuthModes(c *check.C) {
|
||||
modes := []string{"disabled", "relaxed", "enforced"}
|
||||
|
||||
for _, v := range modes {
|
||||
_, isValid := LookupTLSClientAuthMode(v)
|
||||
c.Assert(isValid, check.Equals, true)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package cli
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -10,8 +11,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
keyLength = 64
|
||||
errPreAuthKeyTooShort = Error("key too short, must be 64 hexadecimal characters")
|
||||
errPreAuthKeyMalformed = Error("key is malformed. expected 64 hex characters with `nodekey` prefix")
|
||||
)
|
||||
|
||||
// Error is used to compare errors as per https://dave.cheney.net/2016/04/07/constant-errors
|
||||
@@ -87,8 +87,8 @@ var createNodeCmd = &cobra.Command{
|
||||
|
||||
return
|
||||
}
|
||||
if len(machineKey) != keyLength {
|
||||
err = errPreAuthKeyTooShort
|
||||
if !headscale.NodePublicKeyRegex.Match([]byte(machineKey)) {
|
||||
err = errPreAuthKeyMalformed
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error: %s", err),
|
||||
|
||||
104
cmd/headscale/cli/mockoidc.go
Normal file
104
cmd/headscale/cli/mockoidc.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/oauth2-proxy/mockoidc"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
errMockOidcClientIDNotDefined = Error("MOCKOIDC_CLIENT_ID not defined")
|
||||
errMockOidcClientSecretNotDefined = Error("MOCKOIDC_CLIENT_SECRET not defined")
|
||||
errMockOidcPortNotDefined = Error("MOCKOIDC_PORT not defined")
|
||||
accessTTL = 10 * time.Minute
|
||||
refreshTTL = 60 * time.Minute
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(mockOidcCmd)
|
||||
}
|
||||
|
||||
var mockOidcCmd = &cobra.Command{
|
||||
Use: "mockoidc",
|
||||
Short: "Runs a mock OIDC server for testing",
|
||||
Long: "This internal command runs a OpenID Connect for testing purposes",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := mockOIDC()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Error running mock OIDC server")
|
||||
os.Exit(1)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func mockOIDC() error {
|
||||
clientID := os.Getenv("MOCKOIDC_CLIENT_ID")
|
||||
if clientID == "" {
|
||||
return errMockOidcClientIDNotDefined
|
||||
}
|
||||
clientSecret := os.Getenv("MOCKOIDC_CLIENT_SECRET")
|
||||
if clientSecret == "" {
|
||||
return errMockOidcClientSecretNotDefined
|
||||
}
|
||||
addrStr := os.Getenv("MOCKOIDC_ADDR")
|
||||
if addrStr == "" {
|
||||
return errMockOidcPortNotDefined
|
||||
}
|
||||
portStr := os.Getenv("MOCKOIDC_PORT")
|
||||
if portStr == "" {
|
||||
return errMockOidcPortNotDefined
|
||||
}
|
||||
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mock, err := getMockOIDC(clientID, clientSecret)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", addrStr, port))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = mock.Start(listener, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info().Msgf("Mock OIDC server listening on %s", listener.Addr().String())
|
||||
log.Info().Msgf("Issuer: %s", mock.Issuer())
|
||||
c := make(chan struct{})
|
||||
<-c
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getMockOIDC(clientID string, clientSecret string) (*mockoidc.MockOIDC, error) {
|
||||
keypair, err := mockoidc.NewKeypair(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mock := mockoidc.MockOIDC{
|
||||
ClientID: clientID,
|
||||
ClientSecret: clientSecret,
|
||||
AccessTTL: accessTTL,
|
||||
RefreshTTL: refreshTTL,
|
||||
CodeChallengeMethodsSupported: []string{"plain", "S256"},
|
||||
Keypair: keypair,
|
||||
SessionStore: mockoidc.NewSessionStore(),
|
||||
UserQueue: &mockoidc.UserQueue{},
|
||||
ErrorQueue: &mockoidc.ErrorQueue{},
|
||||
}
|
||||
|
||||
return &mock, nil
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package cli
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/netip"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -13,7 +14,6 @@ import (
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
@@ -134,7 +134,9 @@ var registerNodeCmd = &cobra.Command{
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Machine, "Machine register", output)
|
||||
SuccessOutput(
|
||||
response.Machine,
|
||||
fmt.Sprintf("Machine %s registered", response.Machine.GivenName), output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -467,6 +469,7 @@ func nodesToPtables(
|
||||
"ID",
|
||||
"Hostname",
|
||||
"Name",
|
||||
"MachineKey",
|
||||
"NodeKey",
|
||||
"Namespace",
|
||||
"IP addresses",
|
||||
@@ -502,8 +505,16 @@ func nodesToPtables(
|
||||
expiry = machine.Expiry.AsTime()
|
||||
}
|
||||
|
||||
var machineKey key.MachinePublic
|
||||
err := machineKey.UnmarshalText(
|
||||
[]byte(headscale.MachinePublicKeyEnsurePrefix(machine.MachineKey)),
|
||||
)
|
||||
if err != nil {
|
||||
machineKey = key.MachinePublic{}
|
||||
}
|
||||
|
||||
var nodeKey key.NodePublic
|
||||
err := nodeKey.UnmarshalText(
|
||||
err = nodeKey.UnmarshalText(
|
||||
[]byte(headscale.NodePublicKeyEnsurePrefix(machine.NodeKey)),
|
||||
)
|
||||
if err != nil {
|
||||
@@ -511,9 +522,7 @@ func nodesToPtables(
|
||||
}
|
||||
|
||||
var online string
|
||||
if lastSeen.After(
|
||||
time.Now().Add(-5 * time.Minute),
|
||||
) { // TODO: Find a better way to reliably show if online
|
||||
if machine.Online {
|
||||
online = pterm.LightGreen("online")
|
||||
} else {
|
||||
online = pterm.LightRed("offline")
|
||||
@@ -557,7 +566,7 @@ func nodesToPtables(
|
||||
var IPV4Address string
|
||||
var IPV6Address string
|
||||
for _, addr := range machine.IpAddresses {
|
||||
if netaddr.MustParseIP(addr).Is4() {
|
||||
if netip.MustParseAddr(addr).Is4() {
|
||||
IPV4Address = addr
|
||||
} else {
|
||||
IPV6Address = addr
|
||||
@@ -568,6 +577,7 @@ func nodesToPtables(
|
||||
strconv.FormatUint(machine.Id, headscale.Base10),
|
||||
machine.Name,
|
||||
machine.GetGivenName(),
|
||||
machineKey.ShortString(),
|
||||
nodeKey.ShortString(),
|
||||
namespace,
|
||||
strings.Join([]string{IPV4Address, IPV6Address}, ", "),
|
||||
|
||||
@@ -3,6 +3,7 @@ package cli
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
@@ -33,6 +34,8 @@ func init() {
|
||||
Bool("ephemeral", false, "Preauthkey for ephemeral nodes")
|
||||
createPreAuthKeyCmd.Flags().
|
||||
StringP("expiration", "e", DefaultPreAuthKeyExpiry, "Human-readable expiration of the key (e.g. 30m, 24h)")
|
||||
createPreAuthKeyCmd.Flags().
|
||||
StringSlice("tags", []string{}, "Tags to automatically assign to node")
|
||||
}
|
||||
|
||||
var preauthkeysCmd = &cobra.Command{
|
||||
@@ -81,7 +84,16 @@ var listPreAuthKeys = &cobra.Command{
|
||||
}
|
||||
|
||||
tableData := pterm.TableData{
|
||||
{"ID", "Key", "Reusable", "Ephemeral", "Used", "Expiration", "Created"},
|
||||
{
|
||||
"ID",
|
||||
"Key",
|
||||
"Reusable",
|
||||
"Ephemeral",
|
||||
"Used",
|
||||
"Expiration",
|
||||
"Created",
|
||||
"Tags",
|
||||
},
|
||||
}
|
||||
for _, key := range response.PreAuthKeys {
|
||||
expiration := "-"
|
||||
@@ -96,6 +108,14 @@ var listPreAuthKeys = &cobra.Command{
|
||||
reusable = fmt.Sprintf("%v", key.GetReusable())
|
||||
}
|
||||
|
||||
aclTags := ""
|
||||
|
||||
for _, tag := range key.AclTags {
|
||||
aclTags += "," + tag
|
||||
}
|
||||
|
||||
aclTags = strings.TrimLeft(aclTags, ",")
|
||||
|
||||
tableData = append(tableData, []string{
|
||||
key.GetId(),
|
||||
key.GetKey(),
|
||||
@@ -104,6 +124,7 @@ var listPreAuthKeys = &cobra.Command{
|
||||
strconv.FormatBool(key.GetUsed()),
|
||||
expiration,
|
||||
key.GetCreatedAt().AsTime().Format("2006-01-02 15:04:05"),
|
||||
aclTags,
|
||||
})
|
||||
|
||||
}
|
||||
@@ -136,6 +157,7 @@ var createPreAuthKeyCmd = &cobra.Command{
|
||||
|
||||
reusable, _ := cmd.Flags().GetBool("reusable")
|
||||
ephemeral, _ := cmd.Flags().GetBool("ephemeral")
|
||||
tags, _ := cmd.Flags().GetStringSlice("tags")
|
||||
|
||||
log.Trace().
|
||||
Bool("reusable", reusable).
|
||||
@@ -147,6 +169,7 @@ var createPreAuthKeyCmd = &cobra.Command{
|
||||
Namespace: namespace,
|
||||
Reusable: reusable,
|
||||
Ephemeral: ephemeral,
|
||||
AclTags: tags,
|
||||
}
|
||||
|
||||
durationStr, _ := cmd.Flags().GetString("expiration")
|
||||
|
||||
@@ -15,6 +15,10 @@ import (
|
||||
var cfgFile string = ""
|
||||
|
||||
func init() {
|
||||
if len(os.Args) > 1 && (os.Args[1] == "version" || os.Args[1] == "mockoidc" || os.Args[1] == "completion") {
|
||||
return
|
||||
}
|
||||
|
||||
cobra.OnInitialize(initConfig)
|
||||
rootCmd.PersistentFlags().
|
||||
StringVarP(&cfgFile, "config", "c", "", "config file (default is /etc/headscale/config.yaml)")
|
||||
@@ -25,15 +29,18 @@ func init() {
|
||||
}
|
||||
|
||||
func initConfig() {
|
||||
if cfgFile == "" {
|
||||
cfgFile = os.Getenv("HEADSCALE_CONFIG")
|
||||
}
|
||||
if cfgFile != "" {
|
||||
err := headscale.LoadConfig(cfgFile, true)
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err)
|
||||
log.Fatal().Caller().Err(err).Msgf("Error loading config file %s", cfgFile)
|
||||
}
|
||||
} else {
|
||||
err := headscale.LoadConfig("", false)
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err)
|
||||
log.Fatal().Caller().Err(err).Msgf("Error loading config")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,7 +51,7 @@ func initConfig() {
|
||||
|
||||
machineOutput := HasMachineOutputFlag()
|
||||
|
||||
zerolog.SetGlobalLevel(cfg.LogLevel)
|
||||
zerolog.SetGlobalLevel(cfg.Log.Level)
|
||||
|
||||
// If the user has requested a "machine" readable format,
|
||||
// then disable login so the output remains valid.
|
||||
@@ -52,6 +59,10 @@ func initConfig() {
|
||||
zerolog.SetGlobalLevel(zerolog.Disabled)
|
||||
}
|
||||
|
||||
if cfg.Log.Format == headscale.JSONLogFormat {
|
||||
log.Logger = log.Output(os.Stdout)
|
||||
}
|
||||
|
||||
if !cfg.DisableUpdateCheck && !machineOutput {
|
||||
if (runtime.GOOS == "linux" || runtime.GOOS == "darwin") &&
|
||||
Version != "dev" {
|
||||
|
||||
@@ -11,29 +11,28 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
Base10 = 10
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(routesCmd)
|
||||
|
||||
listRoutesCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err := listRoutesCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
routesCmd.AddCommand(listRoutesCmd)
|
||||
|
||||
enableRouteCmd.Flags().
|
||||
StringSliceP("route", "r", []string{}, "List (or repeated flags) of routes to enable")
|
||||
enableRouteCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
enableRouteCmd.Flags().BoolP("all", "a", false, "All routes from host")
|
||||
|
||||
err = enableRouteCmd.MarkFlagRequired("identifier")
|
||||
enableRouteCmd.Flags().Uint64P("route", "r", 0, "Route identifier (ID)")
|
||||
err := enableRouteCmd.MarkFlagRequired("route")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
routesCmd.AddCommand(enableRouteCmd)
|
||||
|
||||
nodeCmd.AddCommand(routesCmd)
|
||||
disableRouteCmd.Flags().Uint64P("route", "r", 0, "Route identifier (ID)")
|
||||
err = disableRouteCmd.MarkFlagRequired("route")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
routesCmd.AddCommand(disableRouteCmd)
|
||||
}
|
||||
|
||||
var routesCmd = &cobra.Command{
|
||||
@@ -44,7 +43,7 @@ var routesCmd = &cobra.Command{
|
||||
|
||||
var listRoutesCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List routes advertised and enabled by a given node",
|
||||
Short: "List all routes",
|
||||
Aliases: []string{"ls", "show"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
@@ -64,28 +63,51 @@ var listRoutesCmd = &cobra.Command{
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.GetMachineRouteRequest{
|
||||
MachineId: machineID,
|
||||
var routes []*v1.Route
|
||||
|
||||
if machineID == 0 {
|
||||
response, err := client.GetRoutes(ctx, &v1.GetRoutesRequest{})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot get nodes: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.Routes, "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
routes = response.Routes
|
||||
} else {
|
||||
response, err := client.GetMachineRoutes(ctx, &v1.GetMachineRoutesRequest{
|
||||
MachineId: machineID,
|
||||
})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot get routes for machine %d: %s", machineID, status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.Routes, "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
routes = response.Routes
|
||||
}
|
||||
|
||||
response, err := client.GetMachineRoute(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot get nodes: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.Routes, "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
tableData := routesToPtables(response.Routes)
|
||||
tableData := routesToPtables(routes)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
|
||||
@@ -107,16 +129,12 @@ var listRoutesCmd = &cobra.Command{
|
||||
|
||||
var enableRouteCmd = &cobra.Command{
|
||||
Use: "enable",
|
||||
Short: "Set the enabled routes for a given node",
|
||||
Long: `This command will take a list of routes that will _replace_
|
||||
the current set of routes on a given node.
|
||||
If you would like to disable a route, simply run the command again, but
|
||||
omit the route you do not want to enable.
|
||||
`,
|
||||
Short: "Set a route as enabled",
|
||||
Long: `This command will make as enabled a given route.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
machineID, err := cmd.Flags().GetUint64("identifier")
|
||||
routeID, err := cmd.Flags().GetUint64("route")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
@@ -131,52 +149,13 @@ omit the route you do not want to enable.
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
var routes []string
|
||||
|
||||
isAll, _ := cmd.Flags().GetBool("all")
|
||||
if isAll {
|
||||
response, err := client.GetMachineRoute(ctx, &v1.GetMachineRouteRequest{
|
||||
MachineId: machineID,
|
||||
})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot get machine routes: %s\n",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
routes = response.GetRoutes().GetAdvertisedRoutes()
|
||||
} else {
|
||||
routes, err = cmd.Flags().GetStringSlice("route")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting routes from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
request := &v1.EnableMachineRoutesRequest{
|
||||
MachineId: machineID,
|
||||
Routes: routes,
|
||||
}
|
||||
|
||||
response, err := client.EnableMachineRoutes(ctx, request)
|
||||
response, err := client.EnableRoute(ctx, &v1.EnableRouteRequest{
|
||||
RouteId: routeID,
|
||||
})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot register machine: %s\n",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
fmt.Sprintf("Cannot enable route %d: %s", routeID, status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
@@ -184,50 +163,71 @@ omit the route you do not want to enable.
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.Routes, "", output)
|
||||
SuccessOutput(response, "", output)
|
||||
|
||||
return
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
tableData := routesToPtables(response.Routes)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
var disableRouteCmd = &cobra.Command{
|
||||
Use: "disable",
|
||||
Short: "Set as disabled a given route",
|
||||
Long: `This command will make as disabled a given route.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
routeID, err := cmd.Flags().GetUint64("route")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
fmt.Sprintf("Error getting machine id from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
response, err := client.DisableRoute(ctx, &v1.DisableRouteRequest{
|
||||
RouteId: routeID,
|
||||
})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot enable route %d: %s", routeID, status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response, "", output)
|
||||
|
||||
return
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// routesToPtables converts the list of routes to a nice table.
|
||||
func routesToPtables(routes *v1.Routes) pterm.TableData {
|
||||
tableData := pterm.TableData{{"Route", "Enabled"}}
|
||||
func routesToPtables(routes []*v1.Route) pterm.TableData {
|
||||
tableData := pterm.TableData{{"ID", "Machine", "Prefix", "Advertised", "Enabled", "Primary"}}
|
||||
|
||||
for _, route := range routes.GetAdvertisedRoutes() {
|
||||
enabled := isStringInSlice(routes.EnabledRoutes, route)
|
||||
|
||||
tableData = append(tableData, []string{route, strconv.FormatBool(enabled)})
|
||||
for _, route := range routes {
|
||||
tableData = append(tableData,
|
||||
[]string{
|
||||
strconv.FormatUint(route.Id, Base10),
|
||||
route.Machine.GivenName,
|
||||
route.Prefix,
|
||||
strconv.FormatBool(route.Advertised),
|
||||
strconv.FormatBool(route.Enabled),
|
||||
strconv.FormatBool(route.IsPrimary),
|
||||
})
|
||||
}
|
||||
|
||||
return tableData
|
||||
}
|
||||
|
||||
func isStringInSlice(strs []string, s string) bool {
|
||||
for _, s2 := range strs {
|
||||
if s == s2 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
|
||||
const (
|
||||
HeadscaleDateTimeFormat = "2006-01-02 15:04:05"
|
||||
SocketWritePermissions = 0o666
|
||||
)
|
||||
|
||||
func getHeadscaleApp() (*headscale.Headscale, error) {
|
||||
@@ -81,6 +82,19 @@ func getHeadscaleCLIClient() (context.Context, v1.HeadscaleServiceClient, *grpc.
|
||||
|
||||
address = cfg.UnixSocket
|
||||
|
||||
// Try to give the user better feedback if we cannot write to the headscale
|
||||
// socket.
|
||||
socket, err := os.OpenFile(cfg.UnixSocket, os.O_WRONLY, SocketWritePermissions) //nolint
|
||||
if err != nil {
|
||||
if os.IsPermission(err) {
|
||||
log.Fatal().
|
||||
Err(err).
|
||||
Str("socket", cfg.UnixSocket).
|
||||
Msgf("Unable to read/write to headscale socket, do you have the correct permissions?")
|
||||
}
|
||||
}
|
||||
socket.Close()
|
||||
|
||||
grpcOptions = append(
|
||||
grpcOptions,
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
|
||||
@@ -55,10 +55,10 @@ func (*Suite) TestConfigFileLoading(c *check.C) {
|
||||
|
||||
// Test that config file was interpreted correctly
|
||||
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "0.0.0.0:8080")
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("metrics_listen_addr"), check.Equals, "127.0.0.1:9090")
|
||||
c.Assert(viper.GetString("db_type"), check.Equals, "sqlite3")
|
||||
c.Assert(viper.GetString("db_path"), check.Equals, "/var/lib/headscale/db.sqlite")
|
||||
c.Assert(viper.GetString("db_path"), check.Equals, "./db.sqlite")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
|
||||
@@ -98,10 +98,10 @@ func (*Suite) TestConfigLoading(c *check.C) {
|
||||
|
||||
// Test that config file was interpreted correctly
|
||||
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "0.0.0.0:8080")
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("metrics_listen_addr"), check.Equals, "127.0.0.1:9090")
|
||||
c.Assert(viper.GetString("db_type"), check.Equals, "sqlite3")
|
||||
c.Assert(viper.GetString("db_path"), check.Equals, "/var/lib/headscale/db.sqlite")
|
||||
c.Assert(viper.GetString("db_path"), check.Equals, "./db.sqlite")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
|
||||
@@ -163,10 +163,12 @@ func (*Suite) TestTLSConfigValidation(c *check.C) {
|
||||
c.Fatal(err)
|
||||
}
|
||||
// defer os.RemoveAll(tmpDir)
|
||||
|
||||
configYaml := []byte(
|
||||
"---\ntls_letsencrypt_hostname: \"example.com\"\ntls_letsencrypt_challenge_type: \"\"\ntls_cert_path: \"abc.pem\"",
|
||||
)
|
||||
configYaml := []byte(`---
|
||||
tls_letsencrypt_hostname: example.com
|
||||
tls_letsencrypt_challenge_type: ""
|
||||
tls_cert_path: abc.pem
|
||||
noise:
|
||||
private_key_path: noise_private.key`)
|
||||
writeConfig(c, tmpDir, configYaml)
|
||||
|
||||
// Check configuration validation errors (1)
|
||||
@@ -191,9 +193,13 @@ func (*Suite) TestTLSConfigValidation(c *check.C) {
|
||||
)
|
||||
|
||||
// Check configuration validation errors (2)
|
||||
configYaml = []byte(
|
||||
"---\nserver_url: \"http://127.0.0.1:8080\"\ntls_letsencrypt_hostname: \"example.com\"\ntls_letsencrypt_challenge_type: \"TLS-ALPN-01\"",
|
||||
)
|
||||
configYaml = []byte(`---
|
||||
noise:
|
||||
private_key_path: noise_private.key
|
||||
server_url: http://127.0.0.1:8080
|
||||
tls_letsencrypt_hostname: example.com
|
||||
tls_letsencrypt_challenge_type: TLS-ALPN-01
|
||||
`)
|
||||
writeConfig(c, tmpDir, configYaml)
|
||||
err = headscale.LoadConfig(tmpDir, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
@@ -14,7 +14,9 @@ server_url: http://127.0.0.1:8080
|
||||
|
||||
# Address to listen to / bind to on the server
|
||||
#
|
||||
listen_addr: 0.0.0.0:8080
|
||||
# For production:
|
||||
# listen_addr: 0.0.0.0:8080
|
||||
listen_addr: 127.0.0.1:8080
|
||||
|
||||
# Address to listen to /metrics, you may want
|
||||
# to keep this endpoint private to your internal
|
||||
@@ -27,7 +29,10 @@ metrics_listen_addr: 127.0.0.1:9090
|
||||
# remotely with the CLI
|
||||
# Note: Remote access _only_ works if you have
|
||||
# valid certificates.
|
||||
grpc_listen_addr: 0.0.0.0:50443
|
||||
#
|
||||
# For production:
|
||||
# grpc_listen_addr: 0.0.0.0:50443
|
||||
grpc_listen_addr: 127.0.0.1:50443
|
||||
|
||||
# Allow the gRPC admin interface to run in INSECURE
|
||||
# mode. This is not recommended as the traffic will
|
||||
@@ -35,11 +40,25 @@ grpc_listen_addr: 0.0.0.0:50443
|
||||
# are doing.
|
||||
grpc_allow_insecure: false
|
||||
|
||||
# Private key used encrypt the traffic between headscale
|
||||
# Private key used to encrypt the traffic between headscale
|
||||
# and Tailscale clients.
|
||||
# The private key file which will be
|
||||
# autogenerated if it's missing
|
||||
private_key_path: /var/lib/headscale/private.key
|
||||
# The private key file will be autogenerated if it's missing.
|
||||
#
|
||||
# For production:
|
||||
# /var/lib/headscale/private.key
|
||||
private_key_path: ./private.key
|
||||
|
||||
# The Noise section includes specific configuration for the
|
||||
# TS2021 Noise protocol
|
||||
noise:
|
||||
# The Noise private key is used to encrypt the
|
||||
# traffic between headscale and Tailscale clients when
|
||||
# using the new Noise-based protocol. It must be different
|
||||
# from the legacy private key.
|
||||
#
|
||||
# For production:
|
||||
# private_key_path: /var/lib/headscale/noise_private.key
|
||||
private_key_path: ./noise_private.key
|
||||
|
||||
# List of IP prefixes to allocate tailaddresses from.
|
||||
# Each prefix consists of either an IPv4 or IPv6 address,
|
||||
@@ -69,7 +88,7 @@ derp:
|
||||
region_code: "headscale"
|
||||
region_name: "Headscale Embedded DERP"
|
||||
|
||||
# Listens in UDP at the configured address for STUN connections to help on NAT traversal.
|
||||
# Listens over UDP at the configured address for STUN connections - to help with NAT traversal.
|
||||
# When the embedded DERP server is enabled stun_listen_addr MUST be defined.
|
||||
#
|
||||
# For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
|
||||
@@ -103,15 +122,18 @@ disable_check_updates: false
|
||||
# Time before an inactive ephemeral node is deleted?
|
||||
ephemeral_node_inactivity_timeout: 30m
|
||||
|
||||
# Period to check for node updates in the tailnet. A value too low will severily affect
|
||||
# Period to check for node updates within the tailnet. A value too low will severely affect
|
||||
# CPU consumption of Headscale. A value too high (over 60s) will cause problems
|
||||
# to the nodes, as they won't get updates or keep alive messages in time.
|
||||
# for the nodes, as they won't get updates or keep alive messages frequently enough.
|
||||
# In case of doubts, do not touch the default 10s.
|
||||
node_update_check_interval: 10s
|
||||
|
||||
# SQLite config
|
||||
db_type: sqlite3
|
||||
db_path: /var/lib/headscale/db.sqlite
|
||||
|
||||
# For production:
|
||||
# db_path: /var/lib/headscale/db.sqlite
|
||||
db_path: ./db.sqlite
|
||||
|
||||
# # Postgres config
|
||||
# If using a Unix socket to connect to Postgres, set the socket path in the 'host' field and leave 'port' blank.
|
||||
@@ -121,6 +143,9 @@ db_path: /var/lib/headscale/db.sqlite
|
||||
# db_name: headscale
|
||||
# db_user: foo
|
||||
# db_pass: bar
|
||||
|
||||
# If other 'sslmode' is required instead of 'require(true)' and 'disabled(false)', set the 'sslmode' you need
|
||||
# in the 'db_ssl' field. Refers to https://www.postgresql.org/docs/current/libpq-ssl.html Table 34.1.
|
||||
# db_ssl: false
|
||||
|
||||
### TLS configuration
|
||||
@@ -139,23 +164,18 @@ acme_email: ""
|
||||
# Domain name to request a TLS certificate for:
|
||||
tls_letsencrypt_hostname: ""
|
||||
|
||||
# Client (Tailscale/Browser) authentication mode (mTLS)
|
||||
# Acceptable values:
|
||||
# - disabled: client authentication disabled
|
||||
# - relaxed: client certificate is required but not verified
|
||||
# - enforced: client certificate is required and verified
|
||||
tls_client_auth_mode: relaxed
|
||||
|
||||
# Path to store certificates and metadata needed by
|
||||
# letsencrypt
|
||||
tls_letsencrypt_cache_dir: /var/lib/headscale/cache
|
||||
# For production:
|
||||
# tls_letsencrypt_cache_dir: /var/lib/headscale/cache
|
||||
tls_letsencrypt_cache_dir: ./cache
|
||||
|
||||
# Type of ACME challenge to use, currently supported types:
|
||||
# HTTP-01 or TLS-ALPN-01
|
||||
# See [docs/tls.md](docs/tls.md) for more information
|
||||
tls_letsencrypt_challenge_type: HTTP-01
|
||||
# When HTTP-01 challenge is chosen, letsencrypt must set up a
|
||||
# verification endpoint, and it will be listning on:
|
||||
# verification endpoint, and it will be listening on:
|
||||
# :http = port 80
|
||||
tls_letsencrypt_listen: ":http"
|
||||
|
||||
@@ -163,7 +183,10 @@ tls_letsencrypt_listen: ":http"
|
||||
tls_cert_path: ""
|
||||
tls_key_path: ""
|
||||
|
||||
log_level: info
|
||||
log:
|
||||
# Output formatting for logs: text or json
|
||||
format: text
|
||||
level: info
|
||||
|
||||
# Path to a file containg ACL policies.
|
||||
# ACLs can be defined as YAML or HUJSON.
|
||||
@@ -180,10 +203,25 @@ acl_policy_path: ""
|
||||
# - https://tailscale.com/blog/2021-09-private-dns-with-magicdns/
|
||||
#
|
||||
dns_config:
|
||||
# Whether to prefer using Headscale provided DNS or use local.
|
||||
override_local_dns: true
|
||||
|
||||
# List of DNS servers to expose to clients.
|
||||
nameservers:
|
||||
- 1.1.1.1
|
||||
|
||||
# NextDNS (see https://tailscale.com/kb/1218/nextdns/).
|
||||
# "abc123" is example NextDNS ID, replace with yours.
|
||||
#
|
||||
# With metadata sharing:
|
||||
# nameservers:
|
||||
# - https://dns.nextdns.io/abc123
|
||||
#
|
||||
# Without metadata sharing:
|
||||
# nameservers:
|
||||
# - 2a07:a8c0::ab:c123
|
||||
# - 2a07:a8c1::ab:c123
|
||||
|
||||
# Split DNS (see https://tailscale.com/kb/1054/dns/),
|
||||
# list of search domains and the DNS to query for each one.
|
||||
#
|
||||
@@ -208,9 +246,9 @@ dns_config:
|
||||
base_domain: example.com
|
||||
|
||||
# Unix socket used for the CLI to connect without authentication
|
||||
# Note: for local development, you probably want to change this to:
|
||||
# unix_socket: ./headscale.sock
|
||||
unix_socket: /var/run/headscale.sock
|
||||
# Note: for production you will want to set this to something like:
|
||||
# unix_socket: /var/run/headscale.sock
|
||||
unix_socket: ./headscale.sock
|
||||
unix_socket_permission: "0770"
|
||||
#
|
||||
# headscale supports experimental OpenID connect support,
|
||||
@@ -218,6 +256,7 @@ unix_socket_permission: "0770"
|
||||
# help us test it.
|
||||
# OpenID Connect
|
||||
# oidc:
|
||||
# only_start_if_oidc_is_available: true
|
||||
# issuer: "https://your-oidc.issuer.com/path"
|
||||
# client_id: "your-oidc-client-id"
|
||||
# client_secret: "your-oidc-client-secret"
|
||||
@@ -234,6 +273,9 @@ unix_socket_permission: "0770"
|
||||
#
|
||||
# allowed_domains:
|
||||
# - example.com
|
||||
# Groups from keycloak have a leading '/'
|
||||
# allowed_groups:
|
||||
# - /headscale
|
||||
# allowed_users:
|
||||
# - alice@example.com
|
||||
#
|
||||
|
||||
217
config.go
217
config.go
@@ -1,10 +1,10 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/viper"
|
||||
"inet.af/netaddr"
|
||||
"go4.org/netipx"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/dnstype"
|
||||
)
|
||||
@@ -21,6 +21,9 @@ import (
|
||||
const (
|
||||
tlsALPN01ChallengeType = "TLS-ALPN-01"
|
||||
http01ChallengeType = "HTTP-01"
|
||||
|
||||
JSONLogFormat = "json"
|
||||
TextLogFormat = "text"
|
||||
)
|
||||
|
||||
// Config contains the initial Headscale configuration.
|
||||
@@ -32,10 +35,11 @@ type Config struct {
|
||||
GRPCAllowInsecure bool
|
||||
EphemeralNodeInactivityTimeout time.Duration
|
||||
NodeUpdateCheckInterval time.Duration
|
||||
IPPrefixes []netaddr.IPPrefix
|
||||
IPPrefixes []netip.Prefix
|
||||
PrivateKeyPath string
|
||||
NoisePrivateKeyPath string
|
||||
BaseDomain string
|
||||
LogLevel zerolog.Level
|
||||
Log LogConfig
|
||||
DisableUpdateCheck bool
|
||||
|
||||
DERP DERPConfig
|
||||
@@ -47,7 +51,7 @@ type Config struct {
|
||||
DBname string
|
||||
DBuser string
|
||||
DBpass string
|
||||
DBssl bool
|
||||
DBssl string
|
||||
|
||||
TLS TLSConfig
|
||||
|
||||
@@ -70,9 +74,8 @@ type Config struct {
|
||||
}
|
||||
|
||||
type TLSConfig struct {
|
||||
CertPath string
|
||||
KeyPath string
|
||||
ClientAuthMode tls.ClientAuthType
|
||||
CertPath string
|
||||
KeyPath string
|
||||
|
||||
LetsEncrypt LetsEncryptConfig
|
||||
}
|
||||
@@ -85,14 +88,16 @@ type LetsEncryptConfig struct {
|
||||
}
|
||||
|
||||
type OIDCConfig struct {
|
||||
Issuer string
|
||||
ClientID string
|
||||
ClientSecret string
|
||||
Scope []string
|
||||
ExtraParams map[string]string
|
||||
AllowedDomains []string
|
||||
AllowedUsers []string
|
||||
StripEmaildomain bool
|
||||
OnlyStartIfOIDCIsAvailable bool
|
||||
Issuer string
|
||||
ClientID string
|
||||
ClientSecret string
|
||||
Scope []string
|
||||
ExtraParams map[string]string
|
||||
AllowedDomains []string
|
||||
AllowedUsers []string
|
||||
AllowedGroups []string
|
||||
StripEmaildomain bool
|
||||
}
|
||||
|
||||
type DERPConfig struct {
|
||||
@@ -122,6 +127,11 @@ type ACLConfig struct {
|
||||
PolicyPath string
|
||||
}
|
||||
|
||||
type LogConfig struct {
|
||||
Format string
|
||||
Level zerolog.Level
|
||||
}
|
||||
|
||||
func LoadConfig(path string, isFile bool) error {
|
||||
if isFile {
|
||||
viper.SetConfigFile(path)
|
||||
@@ -143,11 +153,12 @@ func LoadConfig(path string, isFile bool) error {
|
||||
|
||||
viper.SetDefault("tls_letsencrypt_cache_dir", "/var/www/.cache")
|
||||
viper.SetDefault("tls_letsencrypt_challenge_type", http01ChallengeType)
|
||||
viper.SetDefault("tls_client_auth_mode", "relaxed")
|
||||
|
||||
viper.SetDefault("log_level", "info")
|
||||
viper.SetDefault("log.level", "info")
|
||||
viper.SetDefault("log.format", TextLogFormat)
|
||||
|
||||
viper.SetDefault("dns_config", nil)
|
||||
viper.SetDefault("dns_config.override_local_dns", true)
|
||||
|
||||
viper.SetDefault("derp.server.enabled", false)
|
||||
viper.SetDefault("derp.server.stun.enabled", true)
|
||||
@@ -161,8 +172,11 @@ func LoadConfig(path string, isFile bool) error {
|
||||
viper.SetDefault("cli.timeout", "5s")
|
||||
viper.SetDefault("cli.insecure", false)
|
||||
|
||||
viper.SetDefault("db_ssl", false)
|
||||
|
||||
viper.SetDefault("oidc.scope", []string{oidc.ScopeOpenID, "profile", "email"})
|
||||
viper.SetDefault("oidc.strip_email_domain", true)
|
||||
viper.SetDefault("oidc.only_start_if_oidc_is_available", true)
|
||||
|
||||
viper.SetDefault("logtail.enabled", false)
|
||||
viper.SetDefault("randomize_client_port", false)
|
||||
@@ -171,6 +185,10 @@ func LoadConfig(path string, isFile bool) error {
|
||||
|
||||
viper.SetDefault("node_update_check_interval", "10s")
|
||||
|
||||
if IsCLIConfigured() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
log.Warn().Err(err).Msg("Failed to read configuration from disk")
|
||||
|
||||
@@ -184,6 +202,10 @@ func LoadConfig(path string, isFile bool) error {
|
||||
errorText += "Fatal config error: set either tls_letsencrypt_hostname or tls_cert_path/tls_key_path, not both\n"
|
||||
}
|
||||
|
||||
if !viper.IsSet("noise") || viper.GetString("noise.private_key_path") == "" {
|
||||
errorText += "Fatal config error: headscale now requires a new `noise.private_key_path` field in the config file for the Tailscale v2 protocol\n"
|
||||
}
|
||||
|
||||
if (viper.GetString("tls_letsencrypt_hostname") != "") &&
|
||||
(viper.GetString("tls_letsencrypt_challenge_type") == tlsALPN01ChallengeType) &&
|
||||
(!strings.HasSuffix(viper.GetString("listen_addr"), ":443")) {
|
||||
@@ -202,19 +224,6 @@ func LoadConfig(path string, isFile bool) error {
|
||||
errorText += "Fatal config error: server_url must start with https:// or http://\n"
|
||||
}
|
||||
|
||||
_, authModeValid := LookupTLSClientAuthMode(
|
||||
viper.GetString("tls_client_auth_mode"),
|
||||
)
|
||||
|
||||
if !authModeValid {
|
||||
errorText += fmt.Sprintf(
|
||||
"Invalid tls_client_auth_mode supplied: %s. Accepted values: %s, %s, %s.",
|
||||
viper.GetString("tls_client_auth_mode"),
|
||||
DisabledClientAuth,
|
||||
RelaxedClientAuth,
|
||||
EnforcedClientAuth)
|
||||
}
|
||||
|
||||
// Minimum inactivity time out is keepalive timeout (60s) plus a few seconds
|
||||
// to avoid races
|
||||
minInactivityTimeout, _ := time.ParseDuration("65s")
|
||||
@@ -244,10 +253,6 @@ func LoadConfig(path string, isFile bool) error {
|
||||
}
|
||||
|
||||
func GetTLSConfig() TLSConfig {
|
||||
tlsClientAuthMode, _ := LookupTLSClientAuthMode(
|
||||
viper.GetString("tls_client_auth_mode"),
|
||||
)
|
||||
|
||||
return TLSConfig{
|
||||
LetsEncrypt: LetsEncryptConfig{
|
||||
Hostname: viper.GetString("tls_letsencrypt_hostname"),
|
||||
@@ -263,7 +268,6 @@ func GetTLSConfig() TLSConfig {
|
||||
KeyPath: AbsolutePathFromConfigPath(
|
||||
viper.GetString("tls_key_path"),
|
||||
),
|
||||
ClientAuthMode: tlsClientAuthMode,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -328,18 +332,59 @@ func GetACLConfig() ACLConfig {
|
||||
}
|
||||
}
|
||||
|
||||
func GetLogConfig() LogConfig {
|
||||
logLevelStr := viper.GetString("log.level")
|
||||
logLevel, err := zerolog.ParseLevel(logLevelStr)
|
||||
if err != nil {
|
||||
logLevel = zerolog.DebugLevel
|
||||
}
|
||||
|
||||
logFormatOpt := viper.GetString("log.format")
|
||||
var logFormat string
|
||||
switch logFormatOpt {
|
||||
case "json":
|
||||
logFormat = JSONLogFormat
|
||||
case "text":
|
||||
logFormat = TextLogFormat
|
||||
case "":
|
||||
logFormat = TextLogFormat
|
||||
default:
|
||||
log.Error().
|
||||
Str("func", "GetLogConfig").
|
||||
Msgf("Could not parse log format: %s. Valid choices are 'json' or 'text'", logFormatOpt)
|
||||
}
|
||||
|
||||
return LogConfig{
|
||||
Format: logFormat,
|
||||
Level: logLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func GetDNSConfig() (*tailcfg.DNSConfig, string) {
|
||||
if viper.IsSet("dns_config") {
|
||||
dnsConfig := &tailcfg.DNSConfig{}
|
||||
|
||||
overrideLocalDNS := viper.GetBool("dns_config.override_local_dns")
|
||||
|
||||
if viper.IsSet("dns_config.nameservers") {
|
||||
nameserversStr := viper.GetStringSlice("dns_config.nameservers")
|
||||
|
||||
nameservers := make([]netaddr.IP, len(nameserversStr))
|
||||
resolvers := make([]*dnstype.Resolver, len(nameserversStr))
|
||||
nameservers := []netip.Addr{}
|
||||
resolvers := []*dnstype.Resolver{}
|
||||
|
||||
for index, nameserverStr := range nameserversStr {
|
||||
nameserver, err := netaddr.ParseIP(nameserverStr)
|
||||
for _, nameserverStr := range nameserversStr {
|
||||
// Search for explicit DNS-over-HTTPS resolvers
|
||||
if strings.HasPrefix(nameserverStr, "https://") {
|
||||
resolvers = append(resolvers, &dnstype.Resolver{
|
||||
Addr: nameserverStr,
|
||||
})
|
||||
|
||||
// This nameserver can not be parsed as an IP address
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse nameserver as a regular IP
|
||||
nameserver, err := netip.ParseAddr(nameserverStr)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "getDNSConfig").
|
||||
@@ -347,14 +392,19 @@ func GetDNSConfig() (*tailcfg.DNSConfig, string) {
|
||||
Msgf("Could not parse nameserver IP: %s", nameserverStr)
|
||||
}
|
||||
|
||||
nameservers[index] = nameserver
|
||||
resolvers[index] = &dnstype.Resolver{
|
||||
nameservers = append(nameservers, nameserver)
|
||||
resolvers = append(resolvers, &dnstype.Resolver{
|
||||
Addr: nameserver.String(),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
dnsConfig.Nameservers = nameservers
|
||||
dnsConfig.Resolvers = resolvers
|
||||
|
||||
if overrideLocalDNS {
|
||||
dnsConfig.Resolvers = resolvers
|
||||
} else {
|
||||
dnsConfig.FallbackResolvers = resolvers
|
||||
}
|
||||
}
|
||||
|
||||
if viper.IsSet("dns_config.restricted_nameservers") {
|
||||
@@ -369,7 +419,7 @@ func GetDNSConfig() (*tailcfg.DNSConfig, string) {
|
||||
len(restrictedNameservers),
|
||||
)
|
||||
for index, nameserverStr := range restrictedNameservers {
|
||||
nameserver, err := netaddr.ParseIP(nameserverStr)
|
||||
nameserver, err := netip.ParseAddr(nameserverStr)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "getDNSConfig").
|
||||
@@ -389,17 +439,17 @@ func GetDNSConfig() (*tailcfg.DNSConfig, string) {
|
||||
}
|
||||
|
||||
if viper.IsSet("dns_config.domains") {
|
||||
dnsConfig.Domains = viper.GetStringSlice("dns_config.domains")
|
||||
domains := viper.GetStringSlice("dns_config.domains")
|
||||
if len(dnsConfig.Nameservers) > 0 {
|
||||
dnsConfig.Domains = domains
|
||||
} else if domains != nil {
|
||||
log.Warn().
|
||||
Msg("Warning: dns_config.domains is set, but no nameservers are configured. Ignoring domains.")
|
||||
}
|
||||
}
|
||||
|
||||
if viper.IsSet("dns_config.magic_dns") {
|
||||
magicDNS := viper.GetBool("dns_config.magic_dns")
|
||||
if len(dnsConfig.Nameservers) > 0 {
|
||||
dnsConfig.Proxied = magicDNS
|
||||
} else if magicDNS {
|
||||
log.Warn().
|
||||
Msg("Warning: dns_config.magic_dns is set, but no nameservers are configured. Ignoring magic_dns.")
|
||||
}
|
||||
dnsConfig.Proxied = viper.GetBool("dns_config.magic_dns")
|
||||
}
|
||||
|
||||
var baseDomain string
|
||||
@@ -416,50 +466,39 @@ func GetDNSConfig() (*tailcfg.DNSConfig, string) {
|
||||
}
|
||||
|
||||
func GetHeadscaleConfig() (*Config, error) {
|
||||
if IsCLIConfigured() {
|
||||
return &Config{
|
||||
CLI: CLIConfig{
|
||||
Address: viper.GetString("cli.address"),
|
||||
APIKey: viper.GetString("cli.api_key"),
|
||||
Timeout: viper.GetDuration("cli.timeout"),
|
||||
Insecure: viper.GetBool("cli.insecure"),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
dnsConfig, baseDomain := GetDNSConfig()
|
||||
derpConfig := GetDERPConfig()
|
||||
logConfig := GetLogTailConfig()
|
||||
randomizeClientPort := viper.GetBool("randomize_client_port")
|
||||
|
||||
configuredPrefixes := viper.GetStringSlice("ip_prefixes")
|
||||
parsedPrefixes := make([]netaddr.IPPrefix, 0, len(configuredPrefixes)+1)
|
||||
|
||||
logLevelStr := viper.GetString("log_level")
|
||||
logLevel, err := zerolog.ParseLevel(logLevelStr)
|
||||
if err != nil {
|
||||
logLevel = zerolog.DebugLevel
|
||||
}
|
||||
|
||||
legacyPrefixField := viper.GetString("ip_prefix")
|
||||
if len(legacyPrefixField) > 0 {
|
||||
log.
|
||||
Warn().
|
||||
Msgf(
|
||||
"%s, %s",
|
||||
"use of 'ip_prefix' for configuration is deprecated",
|
||||
"please see 'ip_prefixes' in the shipped example.",
|
||||
)
|
||||
legacyPrefix, err := netaddr.ParseIPPrefix(legacyPrefixField)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to parse ip_prefix: %w", err))
|
||||
}
|
||||
parsedPrefixes = append(parsedPrefixes, legacyPrefix)
|
||||
}
|
||||
parsedPrefixes := make([]netip.Prefix, 0, len(configuredPrefixes)+1)
|
||||
|
||||
for i, prefixInConfig := range configuredPrefixes {
|
||||
prefix, err := netaddr.ParseIPPrefix(prefixInConfig)
|
||||
prefix, err := netip.ParsePrefix(prefixInConfig)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to parse ip_prefixes[%d]: %w", i, err))
|
||||
}
|
||||
parsedPrefixes = append(parsedPrefixes, prefix)
|
||||
}
|
||||
|
||||
prefixes := make([]netaddr.IPPrefix, 0, len(parsedPrefixes))
|
||||
prefixes := make([]netip.Prefix, 0, len(parsedPrefixes))
|
||||
{
|
||||
// dedup
|
||||
normalizedPrefixes := make(map[string]int, len(parsedPrefixes))
|
||||
for i, p := range parsedPrefixes {
|
||||
normalized, _ := p.Range().Prefix()
|
||||
normalized, _ := netipx.RangeOfPrefix(p).Prefix()
|
||||
normalizedPrefixes[normalized.String()] = i
|
||||
}
|
||||
|
||||
@@ -470,7 +509,7 @@ func GetHeadscaleConfig() (*Config, error) {
|
||||
}
|
||||
|
||||
if len(prefixes) < 1 {
|
||||
prefixes = append(prefixes, netaddr.MustParseIPPrefix("100.64.0.0/10"))
|
||||
prefixes = append(prefixes, netip.MustParsePrefix("100.64.0.0/10"))
|
||||
log.Warn().
|
||||
Msgf("'ip_prefixes' not configured, falling back to default: %v", prefixes)
|
||||
}
|
||||
@@ -482,12 +521,14 @@ func GetHeadscaleConfig() (*Config, error) {
|
||||
GRPCAddr: viper.GetString("grpc_listen_addr"),
|
||||
GRPCAllowInsecure: viper.GetBool("grpc_allow_insecure"),
|
||||
DisableUpdateCheck: viper.GetBool("disable_check_updates"),
|
||||
LogLevel: logLevel,
|
||||
|
||||
IPPrefixes: prefixes,
|
||||
PrivateKeyPath: AbsolutePathFromConfigPath(
|
||||
viper.GetString("private_key_path"),
|
||||
),
|
||||
NoisePrivateKeyPath: AbsolutePathFromConfigPath(
|
||||
viper.GetString("noise.private_key_path"),
|
||||
),
|
||||
BaseDomain: baseDomain,
|
||||
|
||||
DERP: derpConfig,
|
||||
@@ -507,7 +548,7 @@ func GetHeadscaleConfig() (*Config, error) {
|
||||
DBname: viper.GetString("db_name"),
|
||||
DBuser: viper.GetString("db_user"),
|
||||
DBpass: viper.GetString("db_pass"),
|
||||
DBssl: viper.GetBool("db_ssl"),
|
||||
DBssl: viper.GetString("db_ssl"),
|
||||
|
||||
TLS: GetTLSConfig(),
|
||||
|
||||
@@ -520,6 +561,9 @@ func GetHeadscaleConfig() (*Config, error) {
|
||||
UnixSocketPermission: GetFileMode("unix_socket_permission"),
|
||||
|
||||
OIDC: OIDCConfig{
|
||||
OnlyStartIfOIDCIsAvailable: viper.GetBool(
|
||||
"oidc.only_start_if_oidc_is_available",
|
||||
),
|
||||
Issuer: viper.GetString("oidc.issuer"),
|
||||
ClientID: viper.GetString("oidc.client_id"),
|
||||
ClientSecret: viper.GetString("oidc.client_secret"),
|
||||
@@ -527,12 +571,15 @@ func GetHeadscaleConfig() (*Config, error) {
|
||||
ExtraParams: viper.GetStringMapString("oidc.extra_params"),
|
||||
AllowedDomains: viper.GetStringSlice("oidc.allowed_domains"),
|
||||
AllowedUsers: viper.GetStringSlice("oidc.allowed_users"),
|
||||
AllowedGroups: viper.GetStringSlice("oidc.allowed_groups"),
|
||||
StripEmaildomain: viper.GetBool("oidc.strip_email_domain"),
|
||||
},
|
||||
|
||||
LogTail: logConfig,
|
||||
RandomizeClientPort: randomizeClientPort,
|
||||
|
||||
ACL: GetACLConfig(),
|
||||
|
||||
CLI: CLIConfig{
|
||||
Address: viper.GetString("cli.address"),
|
||||
APIKey: viper.GetString("cli.api_key"),
|
||||
@@ -540,6 +587,10 @@ func GetHeadscaleConfig() (*Config, error) {
|
||||
Insecure: viper.GetBool("cli.insecure"),
|
||||
},
|
||||
|
||||
ACL: GetACLConfig(),
|
||||
Log: GetLogConfig(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func IsCLIConfigured() bool {
|
||||
return viper.GetString("cli.address") != "" && viper.GetString("cli.api_key") != ""
|
||||
}
|
||||
|
||||
104
db.go
104
db.go
@@ -6,6 +6,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"time"
|
||||
|
||||
"github.com/glebarez/sqlite"
|
||||
@@ -13,13 +14,14 @@ import (
|
||||
"gorm.io/driver/postgres"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/logger"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
const (
|
||||
dbVersion = "1"
|
||||
errValueNotFound = Error("not found")
|
||||
dbVersion = "1"
|
||||
|
||||
errValueNotFound = Error("not found")
|
||||
ErrCannotParsePrefix = Error("cannot parse prefix")
|
||||
)
|
||||
|
||||
// KV is a key-value store in a psql table. For future use...
|
||||
@@ -79,6 +81,67 @@ func (h *Headscale) initDB() error {
|
||||
}
|
||||
}
|
||||
|
||||
err = db.AutoMigrate(&Route{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if db.Migrator().HasColumn(&Machine{}, "enabled_routes") {
|
||||
log.Info().Msgf("Database has legacy enabled_routes column in machine, migrating...")
|
||||
|
||||
type MachineAux struct {
|
||||
ID uint64
|
||||
EnabledRoutes IPPrefixes
|
||||
}
|
||||
|
||||
machinesAux := []MachineAux{}
|
||||
err := db.Table("machines").Select("id, enabled_routes").Scan(&machinesAux).Error
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("Error accessing db")
|
||||
}
|
||||
for _, machine := range machinesAux {
|
||||
for _, prefix := range machine.EnabledRoutes {
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("enabled_route", prefix.String()).
|
||||
Msg("Error parsing enabled_route")
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
err = db.Preload("Machine").Where("machine_id = ? AND prefix = ?", machine.ID, IPPrefix(prefix)).First(&Route{}).Error
|
||||
if err == nil {
|
||||
log.Info().
|
||||
Str("enabled_route", prefix.String()).
|
||||
Msg("Route already migrated to new table, skipping")
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
route := Route{
|
||||
MachineID: machine.ID,
|
||||
Advertised: true,
|
||||
Enabled: true,
|
||||
Prefix: IPPrefix(prefix),
|
||||
}
|
||||
if err := h.db.Create(&route).Error; err != nil {
|
||||
log.Error().Err(err).Msg("Error creating route")
|
||||
} else {
|
||||
log.Info().
|
||||
Uint64("machine_id", route.MachineID).
|
||||
Str("prefix", prefix.String()).
|
||||
Msg("Route migrated")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = db.Migrator().DropColumn(&Machine{}, "enabled_routes")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error dropping enabled_routes column")
|
||||
}
|
||||
}
|
||||
|
||||
err = db.AutoMigrate(&Machine{})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -131,6 +194,11 @@ func (h *Headscale) initDB() error {
|
||||
return err
|
||||
}
|
||||
|
||||
err = db.AutoMigrate(&PreAuthKeyACLTag{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_ = db.Migrator().DropTable("shared_machines")
|
||||
|
||||
err = db.AutoMigrate(&APIKey{})
|
||||
@@ -221,8 +289,8 @@ func (h *Headscale) setValue(key string, value string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Headscale) pingDB() error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
func (h *Headscale) pingDB(ctx context.Context) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
db, err := h.db.DB()
|
||||
if err != nil {
|
||||
@@ -259,7 +327,31 @@ func (hi HostInfo) Value() (driver.Value, error) {
|
||||
return string(bytes), err
|
||||
}
|
||||
|
||||
type IPPrefixes []netaddr.IPPrefix
|
||||
type IPPrefix netip.Prefix
|
||||
|
||||
func (i *IPPrefix) Scan(destination interface{}) error {
|
||||
switch value := destination.(type) {
|
||||
case string:
|
||||
prefix, err := netip.ParsePrefix(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*i = IPPrefix(prefix)
|
||||
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("%w: unexpected data type %T", ErrCannotParsePrefix, destination)
|
||||
}
|
||||
}
|
||||
|
||||
// Value return json value, implement driver.Valuer interface.
|
||||
func (i IPPrefix) Value() (driver.Value, error) {
|
||||
prefixStr := netip.Prefix(i).String()
|
||||
|
||||
return prefixStr, nil
|
||||
}
|
||||
|
||||
type IPPrefixes []netip.Prefix
|
||||
|
||||
func (i *IPPrefixes) Scan(destination interface{}) error {
|
||||
switch value := destination.(type) {
|
||||
|
||||
2
derp.go
2
derp.go
@@ -34,7 +34,7 @@ func loadDERPMapFromURL(addr url.URL) (*tailcfg.DERPMap, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), HTTPReadTimeout)
|
||||
defer cancel()
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", addr.String(), nil)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, addr.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -98,10 +99,13 @@ func (h *Headscale) DERPHandler(
|
||||
req *http.Request,
|
||||
) {
|
||||
log.Trace().Caller().Msgf("/derp request from %v", req.RemoteAddr)
|
||||
up := strings.ToLower(req.Header.Get("Upgrade"))
|
||||
if up != "websocket" && up != "derp" {
|
||||
if up != "" {
|
||||
log.Warn().Caller().Msgf("Weird websockets connection upgrade: %q", up)
|
||||
upgrade := strings.ToLower(req.Header.Get("Upgrade"))
|
||||
|
||||
if upgrade != "websocket" && upgrade != "derp" {
|
||||
if upgrade != "" {
|
||||
log.Warn().
|
||||
Caller().
|
||||
Msg("No Upgrade header in DERP server request. If headscale is behind a reverse proxy, make sure it is configured to pass WebSockets through.")
|
||||
}
|
||||
writer.Header().Set("Content-Type", "text/plain")
|
||||
writer.WriteHeader(http.StatusUpgradeRequired)
|
||||
@@ -153,7 +157,7 @@ func (h *Headscale) DERPHandler(
|
||||
|
||||
if !fastStart {
|
||||
pubKey := h.privateKey.Public()
|
||||
pubKeyStr := pubKey.UntypedHexString() // nolint
|
||||
pubKeyStr := pubKey.UntypedHexString() //nolint
|
||||
fmt.Fprintf(conn, "HTTP/1.1 101 Switching Protocols\r\n"+
|
||||
"Upgrade: DERP\r\n"+
|
||||
"Connection: Upgrade\r\n"+
|
||||
@@ -163,7 +167,7 @@ func (h *Headscale) DERPHandler(
|
||||
pubKeyStr)
|
||||
}
|
||||
|
||||
h.DERPServer.tailscaleDERP.Accept(netConn, conn, netConn.RemoteAddr().String())
|
||||
h.DERPServer.tailscaleDERP.Accept(req.Context(), netConn, conn, netConn.RemoteAddr().String())
|
||||
}
|
||||
|
||||
// DERPProbeHandler is the endpoint that js/wasm clients hit to measure
|
||||
@@ -173,7 +177,7 @@ func (h *Headscale) DERPProbeHandler(
|
||||
req *http.Request,
|
||||
) {
|
||||
switch req.Method {
|
||||
case "HEAD", "GET":
|
||||
case http.MethodHead, http.MethodGet:
|
||||
writer.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
default:
|
||||
@@ -201,7 +205,7 @@ func (h *Headscale) DERPBootstrapDNSHandler(
|
||||
) {
|
||||
dnsEntries := make(map[string][]net.IP)
|
||||
|
||||
resolvCtx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
resolvCtx, cancel := context.WithTimeout(req.Context(), time.Minute)
|
||||
defer cancel()
|
||||
var resolver net.Resolver
|
||||
for _, region := range h.DERPMap.Regions {
|
||||
@@ -276,7 +280,8 @@ func serverSTUNListener(ctx context.Context, packetConn *net.UDPConn) {
|
||||
continue
|
||||
}
|
||||
|
||||
res := stun.Response(txid, udpAddr.IP, uint16(udpAddr.Port))
|
||||
addr, _ := netip.AddrFromSlice(udpAddr.IP)
|
||||
res := stun.Response(txid, netip.AddrPortFrom(addr, uint16(udpAddr.Port)))
|
||||
_, err = packetConn.WriteTo(res, udpAddr)
|
||||
if err != nil {
|
||||
log.Trace().Caller().Err(err).Msgf("Issue writing to UDP")
|
||||
|
||||
56
dns.go
56
dns.go
@@ -2,11 +2,14 @@ package headscale
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
mapset "github.com/deckarep/golang-set/v2"
|
||||
"inet.af/netaddr"
|
||||
"go4.org/netipx"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/dnstype"
|
||||
"tailscale.com/util/dnsname"
|
||||
)
|
||||
|
||||
@@ -19,6 +22,10 @@ const (
|
||||
ipv6AddressLength = 128
|
||||
)
|
||||
|
||||
const (
|
||||
nextDNSDoHPrefix = "https://dns.nextdns.io"
|
||||
)
|
||||
|
||||
// generateMagicDNSRootDomains generates a list of DNS entries to be included in `Routes` in `MapResponse`.
|
||||
// This list of reverse DNS entries instructs the OS on what subnets and domains the Tailscale embedded DNS
|
||||
// server (listening in 100.100.100.100 udp/53) should be used for.
|
||||
@@ -39,11 +46,11 @@ const (
|
||||
|
||||
// From the netmask we can find out the wildcard bits (the bits that are not set in the netmask).
|
||||
// This allows us to then calculate the subnets included in the subsequent class block and generate the entries.
|
||||
func generateMagicDNSRootDomains(ipPrefixes []netaddr.IPPrefix) []dnsname.FQDN {
|
||||
func generateMagicDNSRootDomains(ipPrefixes []netip.Prefix) []dnsname.FQDN {
|
||||
fqdns := make([]dnsname.FQDN, 0, len(ipPrefixes))
|
||||
for _, ipPrefix := range ipPrefixes {
|
||||
var generateDNSRoot func(netaddr.IPPrefix) []dnsname.FQDN
|
||||
switch ipPrefix.IP().BitLen() {
|
||||
var generateDNSRoot func(netip.Prefix) []dnsname.FQDN
|
||||
switch ipPrefix.Addr().BitLen() {
|
||||
case ipv4AddressLength:
|
||||
generateDNSRoot = generateIPv4DNSRootDomain
|
||||
|
||||
@@ -54,7 +61,7 @@ func generateMagicDNSRootDomains(ipPrefixes []netaddr.IPPrefix) []dnsname.FQDN {
|
||||
panic(
|
||||
fmt.Sprintf(
|
||||
"unsupported IP version with address length %d",
|
||||
ipPrefix.IP().BitLen(),
|
||||
ipPrefix.Addr().BitLen(),
|
||||
),
|
||||
)
|
||||
}
|
||||
@@ -65,9 +72,9 @@ func generateMagicDNSRootDomains(ipPrefixes []netaddr.IPPrefix) []dnsname.FQDN {
|
||||
return fqdns
|
||||
}
|
||||
|
||||
func generateIPv4DNSRootDomain(ipPrefix netaddr.IPPrefix) []dnsname.FQDN {
|
||||
func generateIPv4DNSRootDomain(ipPrefix netip.Prefix) []dnsname.FQDN {
|
||||
// Conversion to the std lib net.IPnet, a bit easier to operate
|
||||
netRange := ipPrefix.IPNet()
|
||||
netRange := netipx.PrefixIPNet(ipPrefix)
|
||||
maskBits, _ := netRange.Mask.Size()
|
||||
|
||||
// lastOctet is the last IP byte covered by the mask
|
||||
@@ -101,11 +108,11 @@ func generateIPv4DNSRootDomain(ipPrefix netaddr.IPPrefix) []dnsname.FQDN {
|
||||
return fqdns
|
||||
}
|
||||
|
||||
func generateIPv6DNSRootDomain(ipPrefix netaddr.IPPrefix) []dnsname.FQDN {
|
||||
func generateIPv6DNSRootDomain(ipPrefix netip.Prefix) []dnsname.FQDN {
|
||||
const nibbleLen = 4
|
||||
|
||||
maskBits, _ := ipPrefix.IPNet().Mask.Size()
|
||||
expanded := ipPrefix.IP().StringExpanded()
|
||||
maskBits, _ := netipx.PrefixIPNet(ipPrefix).Mask.Size()
|
||||
expanded := ipPrefix.Addr().StringExpanded()
|
||||
nibbleStr := strings.Map(func(r rune) rune {
|
||||
if r == ':' {
|
||||
return -1
|
||||
@@ -151,16 +158,39 @@ func generateIPv6DNSRootDomain(ipPrefix netaddr.IPPrefix) []dnsname.FQDN {
|
||||
return fqdns
|
||||
}
|
||||
|
||||
// If any nextdns DoH resolvers are present in the list of resolvers it will
|
||||
// take metadata from the machine metadata and instruct tailscale to add it
|
||||
// to the requests. This makes it possible to identify from which device the
|
||||
// requests come in the NextDNS dashboard.
|
||||
//
|
||||
// This will produce a resolver like:
|
||||
// `https://dns.nextdns.io/<nextdns-id>?device_name=node-name&device_model=linux&device_ip=100.64.0.1`
|
||||
func addNextDNSMetadata(resolvers []*dnstype.Resolver, machine Machine) {
|
||||
for _, resolver := range resolvers {
|
||||
if strings.HasPrefix(resolver.Addr, nextDNSDoHPrefix) {
|
||||
attrs := url.Values{
|
||||
"device_name": []string{machine.Hostname},
|
||||
"device_model": []string{machine.HostInfo.OS},
|
||||
}
|
||||
|
||||
if len(machine.IPAddresses) > 0 {
|
||||
attrs.Add("device_ip", machine.IPAddresses[0].String())
|
||||
}
|
||||
|
||||
resolver.Addr = fmt.Sprintf("%s?%s", resolver.Addr, attrs.Encode())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getMapResponseDNSConfig(
|
||||
dnsConfigOrig *tailcfg.DNSConfig,
|
||||
baseDomain string,
|
||||
machine Machine,
|
||||
peers Machines,
|
||||
) *tailcfg.DNSConfig {
|
||||
var dnsConfig *tailcfg.DNSConfig
|
||||
var dnsConfig *tailcfg.DNSConfig = dnsConfigOrig.Clone()
|
||||
if dnsConfigOrig != nil && dnsConfigOrig.Proxied { // if MagicDNS is enabled
|
||||
// Only inject the Search Domain of the current namespace - shared nodes should use their full FQDN
|
||||
dnsConfig = dnsConfigOrig.Clone()
|
||||
dnsConfig.Domains = append(
|
||||
dnsConfig.Domains,
|
||||
fmt.Sprintf(
|
||||
@@ -183,5 +213,7 @@ func getMapResponseDNSConfig(
|
||||
dnsConfig = dnsConfigOrig
|
||||
}
|
||||
|
||||
addNextDNSMetadata(dnsConfig.Resolvers, machine)
|
||||
|
||||
return dnsConfig
|
||||
}
|
||||
|
||||
42
dns_test.go
42
dns_test.go
@@ -2,16 +2,16 @@ package headscale
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/dnstype"
|
||||
)
|
||||
|
||||
func (s *Suite) TestMagicDNSRootDomains100(c *check.C) {
|
||||
prefixes := []netaddr.IPPrefix{
|
||||
netaddr.MustParseIPPrefix("100.64.0.0/10"),
|
||||
prefixes := []netip.Prefix{
|
||||
netip.MustParsePrefix("100.64.0.0/10"),
|
||||
}
|
||||
domains := generateMagicDNSRootDomains(prefixes)
|
||||
|
||||
@@ -47,8 +47,8 @@ func (s *Suite) TestMagicDNSRootDomains100(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestMagicDNSRootDomains172(c *check.C) {
|
||||
prefixes := []netaddr.IPPrefix{
|
||||
netaddr.MustParseIPPrefix("172.16.0.0/16"),
|
||||
prefixes := []netip.Prefix{
|
||||
netip.MustParsePrefix("172.16.0.0/16"),
|
||||
}
|
||||
domains := generateMagicDNSRootDomains(prefixes)
|
||||
|
||||
@@ -75,8 +75,8 @@ func (s *Suite) TestMagicDNSRootDomains172(c *check.C) {
|
||||
|
||||
// Happens when netmask is a multiple of 4 bits (sounds likely).
|
||||
func (s *Suite) TestMagicDNSRootDomainsIPv6Single(c *check.C) {
|
||||
prefixes := []netaddr.IPPrefix{
|
||||
netaddr.MustParseIPPrefix("fd7a:115c:a1e0::/48"),
|
||||
prefixes := []netip.Prefix{
|
||||
netip.MustParsePrefix("fd7a:115c:a1e0::/48"),
|
||||
}
|
||||
domains := generateMagicDNSRootDomains(prefixes)
|
||||
|
||||
@@ -89,8 +89,8 @@ func (s *Suite) TestMagicDNSRootDomainsIPv6Single(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestMagicDNSRootDomainsIPv6SingleMultiple(c *check.C) {
|
||||
prefixes := []netaddr.IPPrefix{
|
||||
netaddr.MustParseIPPrefix("fd7a:115c:a1e0::/50"),
|
||||
prefixes := []netip.Prefix{
|
||||
netip.MustParsePrefix("fd7a:115c:a1e0::/50"),
|
||||
}
|
||||
domains := generateMagicDNSRootDomains(prefixes)
|
||||
|
||||
@@ -126,6 +126,7 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
@@ -134,6 +135,7 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
@@ -142,6 +144,7 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
@@ -150,6 +153,7 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
@@ -165,7 +169,7 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
NamespaceID: namespaceShared1.ID,
|
||||
Namespace: *namespaceShared1,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.1")},
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")},
|
||||
AuthKeyID: uint(preAuthKeyInShared1.ID),
|
||||
}
|
||||
app.db.Save(machineInShared1)
|
||||
@@ -182,7 +186,7 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
NamespaceID: namespaceShared2.ID,
|
||||
Namespace: *namespaceShared2,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.2")},
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")},
|
||||
AuthKeyID: uint(preAuthKeyInShared2.ID),
|
||||
}
|
||||
app.db.Save(machineInShared2)
|
||||
@@ -199,7 +203,7 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
NamespaceID: namespaceShared3.ID,
|
||||
Namespace: *namespaceShared3,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.3")},
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")},
|
||||
AuthKeyID: uint(preAuthKeyInShared3.ID),
|
||||
}
|
||||
app.db.Save(machineInShared3)
|
||||
@@ -216,7 +220,7 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
NamespaceID: namespaceShared1.ID,
|
||||
Namespace: *namespaceShared1,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.4")},
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.4")},
|
||||
AuthKeyID: uint(PreAuthKey2InShared1.ID),
|
||||
}
|
||||
app.db.Save(machine2InShared1)
|
||||
@@ -269,6 +273,7 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
@@ -277,6 +282,7 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
@@ -285,6 +291,7 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
@@ -293,6 +300,7 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
@@ -308,7 +316,7 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
NamespaceID: namespaceShared1.ID,
|
||||
Namespace: *namespaceShared1,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.1")},
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")},
|
||||
AuthKeyID: uint(preAuthKeyInShared1.ID),
|
||||
}
|
||||
app.db.Save(machineInShared1)
|
||||
@@ -325,7 +333,7 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
NamespaceID: namespaceShared2.ID,
|
||||
Namespace: *namespaceShared2,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.2")},
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")},
|
||||
AuthKeyID: uint(preAuthKeyInShared2.ID),
|
||||
}
|
||||
app.db.Save(machineInShared2)
|
||||
@@ -342,7 +350,7 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
NamespaceID: namespaceShared3.ID,
|
||||
Namespace: *namespaceShared3,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.3")},
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")},
|
||||
AuthKeyID: uint(preAuthKeyInShared3.ID),
|
||||
}
|
||||
app.db.Save(machineInShared3)
|
||||
@@ -359,7 +367,7 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
NamespaceID: namespaceShared1.ID,
|
||||
Namespace: *namespaceShared1,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.4")},
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.4")},
|
||||
AuthKeyID: uint(preAuthKey2InShared1.ID),
|
||||
}
|
||||
app.db.Save(machine2InShared1)
|
||||
|
||||
@@ -28,6 +28,7 @@ written by community members. It is _not_ verified by `headscale` developers.
|
||||
|
||||
- [Running headscale in a container](running-headscale-container.md)
|
||||
- [Running headscale on OpenBSD](running-headscale-openbsd.md)
|
||||
- [Running headscale behind a reverse proxy](reverse-proxy.md)
|
||||
|
||||
## Misc
|
||||
|
||||
|
||||
BIN
docs/logo/headscale3-dots.pdf
Normal file
BIN
docs/logo/headscale3-dots.pdf
Normal file
Binary file not shown.
BIN
docs/logo/headscale3-dots.png
Normal file
BIN
docs/logo/headscale3-dots.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 34 KiB |
BIN
docs/logo/headscale3_header_stacked_left.pdf
Normal file
BIN
docs/logo/headscale3_header_stacked_left.pdf
Normal file
Binary file not shown.
BIN
docs/logo/headscale3_header_stacked_left.png
Normal file
BIN
docs/logo/headscale3_header_stacked_left.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 49 KiB |
48
docs/proposals/002-better-routing.md
Normal file
48
docs/proposals/002-better-routing.md
Normal file
@@ -0,0 +1,48 @@
|
||||
# Better route management
|
||||
|
||||
As of today, route management in Headscale is very basic and does not allow for much flexibility, including implementing subnet HA, 4via6 or more advanced features. We also have a number of bugs (e.g., routes exposed by ephemeral nodes)
|
||||
|
||||
This proposal aims to improve the route management.
|
||||
|
||||
## Current situation
|
||||
|
||||
Routes advertised by the nodes are read from the Hostinfo struct. If approved from the the CLI or via autoApprovers, the route is added to the EnabledRoutes field in `Machine`.
|
||||
|
||||
This means that the advertised routes are not persisted in the database, as Hostinfo is always replaced. In the same way, EnabledRoutes can get out of sync with the actual routes in the node.
|
||||
|
||||
In case of colliding routes (i.e., subnets that are exposed from multiple nodes), we are currently just sending all of them in `PrimaryRoutes`... and hope for the best. (`PrimaryRoutes` is the field in `Node` used for subnet failover).
|
||||
|
||||
## Proposal
|
||||
|
||||
The core part is to create a new `Route` struct (and DB table), with the following fields:
|
||||
|
||||
```go
|
||||
type Route struct {
|
||||
ID uint64 `gorm:"primary_key"`
|
||||
|
||||
Machine *Machine
|
||||
Prefix IPPrefix
|
||||
|
||||
Advertised bool
|
||||
Enabled bool
|
||||
IsPrimary bool
|
||||
|
||||
|
||||
CreatedAt *time.Time
|
||||
UpdatedAt *time.Time
|
||||
DeletedAt *time.Time
|
||||
}
|
||||
```
|
||||
|
||||
- The `Advertised` field is set to true if the route is being advertised by the node. It is set to false if the route is removed. This way we can indicate if a later enabled route has stopped being advertised. A similar behaviour happens in the Tailscale.com control panel.
|
||||
|
||||
- The `Enabled` field is set to true if the route is enabled - via CLI or autoApprovers.
|
||||
|
||||
- `IsPrimary` indicates if Headscale has selected this route as the primary route for that particular subnet. This allows us to implement subnet failover. This would be fully automatic if there is more than subnet routers advertising the same network - which is the behaviour of Tailscale.com.
|
||||
|
||||
## Stuff to bear in mind
|
||||
|
||||
- We need to make sure to migrate the current `EnabledRoutes` of `Machine` into the new table.
|
||||
- When a node stops sharing a subnet, I reckon we should mark it both as not `Advertised` and not `Enabled`. Users should re-enable it if the node advertises it again.
|
||||
- If only one subnet router is advertising a subnet, we should mark it as primary.
|
||||
- Regarding subnet failover, the current behaviour of Tailscale.com is to perform the failover after 15 seconds from the node disconnecting from their control panel. I reckon we cannot do the same currently. Our maximum granularity is the keep alive period.
|
||||
@@ -43,7 +43,7 @@ headscale apikeys expire --prefix "<PREFIX>"
|
||||
|
||||
1. Download the latest [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases):
|
||||
|
||||
2. Put the binary somewhere in your `PATH`, e.g. `/usr/local/bin/headcale`
|
||||
2. Put the binary somewhere in your `PATH`, e.g. `/usr/local/bin/headscale`
|
||||
|
||||
3. Make `headscale` executable:
|
||||
|
||||
|
||||
100
docs/reverse-proxy.md
Normal file
100
docs/reverse-proxy.md
Normal file
@@ -0,0 +1,100 @@
|
||||
# Running headscale behind a reverse proxy
|
||||
|
||||
Running headscale behind a reverse proxy is useful when running multiple applications on the same server, and you want to reuse the same external IP and port - usually tcp/443 for HTTPS.
|
||||
|
||||
### WebSockets
|
||||
|
||||
The reverse proxy MUST be configured to support WebSockets, as it is needed for clients running Tailscale v1.30+.
|
||||
|
||||
WebSockets support is required when using the headscale embedded DERP server. In this case, you will also need to expose the UDP port used for STUN (by default, udp/3478). Please check our [config-example.yaml](https://github.com/juanfont/headscale/blob/main/config-example.yaml).
|
||||
|
||||
### TLS
|
||||
|
||||
Headscale can be configured not to use TLS, leaving it to the reverse proxy to handle. Add the following configuration values to your headscale config file.
|
||||
|
||||
```yaml
|
||||
server_url: https://<YOUR_SERVER_NAME> # This should be the FQDN at which headscale will be served
|
||||
listen_addr: 0.0.0.0:8080
|
||||
metrics_listen_addr: 0.0.0.0:9090
|
||||
tls_cert_path: ""
|
||||
tls_key_path: ""
|
||||
```
|
||||
|
||||
## nginx
|
||||
|
||||
The following example configuration can be used in your nginx setup, substituting values as necessary. `<IP:PORT>` should be the IP address and port where headscale is running. In most cases, this will be `http://localhost:8080`.
|
||||
|
||||
```Nginx
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default keep-alive;
|
||||
'websocket' upgrade;
|
||||
'' close;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
listen [::]:80;
|
||||
|
||||
listen 443 ssl http2;
|
||||
listen [::]:443 ssl http2;
|
||||
|
||||
server_name <YOUR_SERVER_NAME>;
|
||||
|
||||
ssl_certificate <PATH_TO_CERT>;
|
||||
ssl_certificate_key <PATH_CERT_KEY>;
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
|
||||
location / {
|
||||
proxy_pass http://<IP:PORT>;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
proxy_set_header Host $server_name;
|
||||
proxy_redirect http:// https://;
|
||||
proxy_buffering off;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
|
||||
add_header Strict-Transport-Security "max-age=15552000; includeSubDomains" always;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## istio/envoy
|
||||
|
||||
If you using [Istio](https://istio.io/) ingressgateway or [Envoy](https://www.envoyproxy.io/) as reverse proxy, there are some tips for you. If not set, you may see some debug log in proxy as below:
|
||||
|
||||
```log
|
||||
Sending local reply with details upgrade_failed
|
||||
```
|
||||
|
||||
### Envoy
|
||||
|
||||
You need add a new upgrade_type named `tailscale-control-protocol`. [see detail](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto#extensions-filters-network-http-connection-manager-v3-httpconnectionmanager-upgradeconfig)
|
||||
|
||||
### Istio
|
||||
|
||||
Same as envoy, we can use `EnvoyFilter` to add upgrade_type.
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: EnvoyFilter
|
||||
metadata:
|
||||
name: headscale-behind-istio-ingress
|
||||
namespace: istio-system
|
||||
spec:
|
||||
configPatches:
|
||||
- applyTo: NETWORK_FILTER
|
||||
match:
|
||||
listener:
|
||||
filterChain:
|
||||
filter:
|
||||
name: envoy.filters.network.http_connection_manager
|
||||
patch:
|
||||
operation: MERGE
|
||||
value:
|
||||
typed_config:
|
||||
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
|
||||
upgrade_configs:
|
||||
- upgrade_type: tailscale-control-protocol
|
||||
```
|
||||
@@ -48,12 +48,17 @@ Modify the config file to your preferences before launching Docker container.
|
||||
Here are some settings that you likely want:
|
||||
|
||||
```yaml
|
||||
server_url: http://your-host-name:8080 # Change to your hostname or host IP
|
||||
# Change to your hostname or host IP
|
||||
server_url: http://your-host-name:8080
|
||||
# Listen to 0.0.0.0 so it's accessible outside the container
|
||||
metrics_listen_addr: 0.0.0.0:9090
|
||||
# The default /var/lib/headscale path is not writable in the container
|
||||
private_key_path: /etc/headscale/private.key
|
||||
# The default /var/lib/headscale path is not writable in the container
|
||||
noise:
|
||||
private_key_path: /etc/headscale/noise_private.key
|
||||
# The default /var/lib/headscale path is not writable in the container
|
||||
db_type: sqlite3
|
||||
db_path: /etc/headscale/db.sqlite
|
||||
```
|
||||
|
||||
@@ -63,7 +68,6 @@ db_path: /etc/headscale/db.sqlite
|
||||
docker run \
|
||||
--name headscale \
|
||||
--detach \
|
||||
--rm \
|
||||
--volume $(pwd)/config:/etc/headscale/ \
|
||||
--publish 127.0.0.1:8080:8080 \
|
||||
--publish 127.0.0.1:9090:9090 \
|
||||
|
||||
@@ -17,7 +17,7 @@ describing how to make `headscale` run properly in a server environment.
|
||||
|
||||
```shell
|
||||
# Install prerequistes
|
||||
# 1. go v1.18+: headscale newer than 0.15 needs go 1.18+ to compile
|
||||
# 1. go v1.19+: headscale newer than 0.17 needs go 1.19+ to compile
|
||||
# 2. gmake: Makefile in the headscale repo is written in GNU make syntax
|
||||
pkg_add -D snap go
|
||||
pkg_add gmake
|
||||
@@ -46,7 +46,7 @@ cp headscale /usr/local/sbin
|
||||
|
||||
```shell
|
||||
# Install prerequistes
|
||||
# 1. go v1.18+: headscale newer than 0.15 needs go 1.18+ to compile
|
||||
# 1. go v1.19+: headscale newer than 0.17 needs go 1.19+ to compile
|
||||
# 2. gmake: Makefile in the headscale repo is written in GNU make syntax
|
||||
|
||||
git clone https://github.com/juanfont/headscale.git
|
||||
|
||||
14
docs/tls.md
14
docs/tls.md
@@ -29,17 +29,3 @@ headscale can also be configured to expose its web service via TLS. To configure
|
||||
tls_cert_path: ""
|
||||
tls_key_path: ""
|
||||
```
|
||||
|
||||
### Configuring Mutual TLS Authentication (mTLS)
|
||||
|
||||
mTLS is a method by which an HTTPS server authenticates clients, e.g. Tailscale, using TLS certificates. This can be configured by applying one of the following values to the `tls_client_auth_mode` setting in the configuration file.
|
||||
|
||||
| Value | Behavior |
|
||||
| ------------------- | ---------------------------------------------------------- |
|
||||
| `disabled` | Disable mTLS. |
|
||||
| `relaxed` (default) | A client certificate is required, but it is not verified. |
|
||||
| `enforced` | Requires clients to supply a certificate that is verified. |
|
||||
|
||||
```yaml
|
||||
tls_client_auth_mode: ""
|
||||
```
|
||||
|
||||
14
flake.lock
generated
14
flake.lock
generated
@@ -2,11 +2,11 @@
|
||||
"nodes": {
|
||||
"flake-utils": {
|
||||
"locked": {
|
||||
"lastModified": 1653893745,
|
||||
"narHash": "sha256-0jntwV3Z8//YwuOjzhV2sgJJPt+HY6KhU7VZUL0fKZQ=",
|
||||
"lastModified": 1667395993,
|
||||
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "1ed9fb1935d260de5fe1c2f7ee0ebaae17ed2fa1",
|
||||
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -17,16 +17,16 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1654847188,
|
||||
"narHash": "sha256-MC+eP7XOGE1LAswOPqdcGoUqY9mEQ3ZaaxamVTbc0hM=",
|
||||
"lastModified": 1670064435,
|
||||
"narHash": "sha256-+ELoY30UN+Pl3Yn7RWRPabykwebsVK/kYE9JsIsUMxQ=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "8b66e3f2ebcc644b78cec9d6f152192f4e7d322f",
|
||||
"rev": "61a8a98e6d557e6dd7ed0cdb54c3a3e3bbc5e25c",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-22.05",
|
||||
"ref": "nixos-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
|
||||
294
flake.nix
294
flake.nix
@@ -2,167 +2,161 @@
|
||||
description = "headscale - Open Source Tailscale Control server";
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-22.05";
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
||||
flake-utils.url = "github:numtide/flake-utils";
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs, flake-utils, ... }:
|
||||
let
|
||||
headscaleVersion = if (self ? shortRev) then self.shortRev else "dev";
|
||||
in
|
||||
outputs = {
|
||||
self,
|
||||
nixpkgs,
|
||||
flake-utils,
|
||||
...
|
||||
}: let
|
||||
headscaleVersion =
|
||||
if (self ? shortRev)
|
||||
then self.shortRev
|
||||
else "dev";
|
||||
in
|
||||
{
|
||||
overlay = final: prev:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${prev.system};
|
||||
in
|
||||
rec {
|
||||
headscale =
|
||||
pkgs.buildGo118Module rec {
|
||||
pname = "headscale";
|
||||
version = headscaleVersion;
|
||||
src = pkgs.lib.cleanSource self;
|
||||
overlay = _: prev: let
|
||||
pkgs = nixpkgs.legacyPackages.${prev.system};
|
||||
in rec {
|
||||
headscale = pkgs.buildGo119Module rec {
|
||||
pname = "headscale";
|
||||
version = headscaleVersion;
|
||||
src = pkgs.lib.cleanSource self;
|
||||
|
||||
# When updating go.mod or go.sum, a new sha will need to be calculated,
|
||||
# update this if you have a mismatch after doing a change to thos files.
|
||||
vendorSha256 = "sha256-RzmnAh81BN4tbzAGzJbb6CMuws8kuPJDw7aPkRRnSS8=";
|
||||
tags = ["ts2019"];
|
||||
|
||||
ldflags = [ "-s" "-w" "-X github.com/juanfont/headscale/cmd/headscale/cli.Version=v${version}" ];
|
||||
};
|
||||
# Only run unit tests when testing a build
|
||||
checkFlags = ["-short"];
|
||||
|
||||
golines =
|
||||
pkgs.buildGoModule rec {
|
||||
pname = "golines";
|
||||
version = "0.9.0";
|
||||
# When updating go.mod or go.sum, a new sha will need to be calculated,
|
||||
# update this if you have a mismatch after doing a change to thos files.
|
||||
vendorSha256 = "sha256-SuKT+b8g6xEK15ry2IAmpS/vwDG+zJqK9nfsWpHNXuU=";
|
||||
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "segmentio";
|
||||
repo = "golines";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-BUXEg+4r9L/gqe4DhTlhN55P3jWt7ZyWFQycO6QePrw=";
|
||||
};
|
||||
|
||||
vendorSha256 = "sha256-sEzWUeVk5GB0H41wrp12P8sBWRjg0FHUX6ABDEEBqK8=";
|
||||
|
||||
nativeBuildInputs = [ pkgs.installShellFiles ];
|
||||
};
|
||||
|
||||
golangci-lint = prev.golangci-lint.override {
|
||||
# Override https://github.com/NixOS/nixpkgs/pull/166801 which changed this
|
||||
# to buildGo118Module because it does not build on Darwin.
|
||||
inherit (prev) buildGoModule;
|
||||
};
|
||||
|
||||
# golangci-lint =
|
||||
# pkgs.buildGo117Module rec {
|
||||
# pname = "golangci-lint";
|
||||
# version = "1.46.2";
|
||||
#
|
||||
# src = pkgs.fetchFromGitHub {
|
||||
# owner = "golangci";
|
||||
# repo = "golangci-lint";
|
||||
# rev = "v${version}";
|
||||
# sha256 = "sha256-7sDAwWz+qoB/ngeH35tsJ5FZUfAQvQsU6kU9rUHIHMk=";
|
||||
# };
|
||||
#
|
||||
# vendorSha256 = "sha256-w38OKN6HPoz37utG/2QSPMai55IRDXCIIymeMe6ogIU=";
|
||||
#
|
||||
# nativeBuildInputs = [ pkgs.installShellFiles ];
|
||||
# };
|
||||
|
||||
protoc-gen-grpc-gateway =
|
||||
pkgs.buildGoModule rec {
|
||||
pname = "grpc-gateway";
|
||||
version = "2.8.0";
|
||||
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "grpc-ecosystem";
|
||||
repo = "grpc-gateway";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-8eBBBYJ+tBjB2fgPMX/ZlbN3eeS75e8TAZYOKXs6hcg=";
|
||||
};
|
||||
|
||||
vendorSha256 = "sha256-AW2Gn/mlZyLMwF+NpK59eiOmQrYWW/9HPjbunYc9Ij4=";
|
||||
|
||||
nativeBuildInputs = [ pkgs.installShellFiles ];
|
||||
|
||||
subPackages = [ "protoc-gen-grpc-gateway" "protoc-gen-openapiv2" ];
|
||||
};
|
||||
ldflags = ["-s" "-w" "-X github.com/juanfont/headscale/cmd/headscale/cli.Version=v${version}"];
|
||||
};
|
||||
} // flake-utils.lib.eachDefaultSystem
|
||||
(system:
|
||||
let
|
||||
pkgs = import nixpkgs {
|
||||
overlays = [ self.overlay ];
|
||||
inherit system;
|
||||
|
||||
golines = pkgs.buildGoModule rec {
|
||||
pname = "golines";
|
||||
version = "0.11.0";
|
||||
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "segmentio";
|
||||
repo = "golines";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-2K9KAg8iSubiTbujyFGN3yggrL+EDyeUCs9OOta/19A=";
|
||||
};
|
||||
buildDeps = with pkgs; [ git go_1_18 gnumake ];
|
||||
devDeps = with pkgs;
|
||||
buildDeps ++ [
|
||||
|
||||
vendorSha256 = "sha256-rxYuzn4ezAxaeDhxd8qdOzt+CKYIh03A9zKNdzILq18=";
|
||||
|
||||
nativeBuildInputs = [pkgs.installShellFiles];
|
||||
};
|
||||
|
||||
golangci-lint = prev.golangci-lint.override {
|
||||
# Override https://github.com/NixOS/nixpkgs/pull/166801 which changed this
|
||||
# to buildGo118Module because it does not build on Darwin.
|
||||
inherit (prev) buildGoModule;
|
||||
};
|
||||
|
||||
protoc-gen-grpc-gateway = pkgs.buildGoModule rec {
|
||||
pname = "grpc-gateway";
|
||||
version = "2.14.0";
|
||||
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "grpc-ecosystem";
|
||||
repo = "grpc-gateway";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-lnNdsDCpeSHtl2lC1IhUw11t3cnGF+37qSM7HDvKLls=";
|
||||
};
|
||||
|
||||
vendorSha256 = "sha256-dGdnDuRbwg8fU7uB5GaHEWa/zI3w06onqjturvooJQA=";
|
||||
|
||||
nativeBuildInputs = [pkgs.installShellFiles];
|
||||
|
||||
subPackages = ["protoc-gen-grpc-gateway" "protoc-gen-openapiv2"];
|
||||
};
|
||||
};
|
||||
}
|
||||
// flake-utils.lib.eachDefaultSystem
|
||||
(system: let
|
||||
pkgs = import nixpkgs {
|
||||
overlays = [self.overlay];
|
||||
inherit system;
|
||||
};
|
||||
buildDeps = with pkgs; [git go_1_19 gnumake];
|
||||
devDeps = with pkgs;
|
||||
buildDeps
|
||||
++ [
|
||||
golangci-lint
|
||||
golines
|
||||
nodePackages.prettier
|
||||
goreleaser
|
||||
|
||||
# Protobuf dependencies
|
||||
protobuf
|
||||
protoc-gen-go
|
||||
protoc-gen-go-grpc
|
||||
protoc-gen-grpc-gateway
|
||||
buf
|
||||
clang-tools # clang-format
|
||||
];
|
||||
|
||||
# Add entry to build a docker image with headscale
|
||||
# caveat: only works on Linux
|
||||
#
|
||||
# Usage:
|
||||
# nix build .#headscale-docker
|
||||
# docker load < result
|
||||
headscale-docker = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "headscale";
|
||||
tag = headscaleVersion;
|
||||
contents = [pkgs.headscale];
|
||||
config.Entrypoint = [(pkgs.headscale + "/bin/headscale")];
|
||||
};
|
||||
in rec {
|
||||
# `nix develop`
|
||||
devShell = pkgs.mkShell {
|
||||
buildInputs = devDeps;
|
||||
|
||||
shellHook = ''
|
||||
export GOFLAGS=-tags="ts2019"
|
||||
'';
|
||||
};
|
||||
|
||||
# `nix build`
|
||||
packages = with pkgs; {
|
||||
inherit headscale;
|
||||
inherit headscale-docker;
|
||||
};
|
||||
defaultPackage = pkgs.headscale;
|
||||
|
||||
# `nix run`
|
||||
apps.headscale = flake-utils.lib.mkApp {
|
||||
drv = packages.headscale;
|
||||
};
|
||||
apps.default = apps.headscale;
|
||||
|
||||
checks = {
|
||||
format =
|
||||
pkgs.runCommand "check-format"
|
||||
{
|
||||
buildInputs = with pkgs; [
|
||||
gnumake
|
||||
nixpkgs-fmt
|
||||
golangci-lint
|
||||
golines
|
||||
nodePackages.prettier
|
||||
|
||||
# Protobuf dependencies
|
||||
protobuf
|
||||
protoc-gen-go
|
||||
protoc-gen-go-grpc
|
||||
protoc-gen-grpc-gateway
|
||||
buf
|
||||
clang-tools # clang-format
|
||||
golines
|
||||
clang-tools
|
||||
];
|
||||
|
||||
|
||||
# Add entry to build a docker image with headscale
|
||||
# caveat: only works on Linux
|
||||
#
|
||||
# Usage:
|
||||
# nix build .#headscale-docker
|
||||
# docker load < result
|
||||
headscale-docker = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "headscale";
|
||||
tag = headscaleVersion;
|
||||
contents = [ pkgs.headscale ];
|
||||
config.Entrypoint = [ (pkgs.headscale + "/bin/headscale") ];
|
||||
};
|
||||
in
|
||||
rec {
|
||||
# `nix develop`
|
||||
devShell = pkgs.mkShell { buildInputs = devDeps; };
|
||||
|
||||
# `nix build`
|
||||
packages = with pkgs; {
|
||||
inherit headscale;
|
||||
inherit headscale-docker;
|
||||
};
|
||||
|
||||
defaultPackage = pkgs.headscale;
|
||||
|
||||
# `nix run`
|
||||
apps.headscale = flake-utils.lib.mkApp {
|
||||
drv = packages.headscale;
|
||||
};
|
||||
defaultApp = apps.headscale;
|
||||
|
||||
checks = {
|
||||
format = pkgs.runCommand "check-format"
|
||||
{
|
||||
buildInputs = with pkgs; [
|
||||
gnumake
|
||||
nixpkgs-fmt
|
||||
golangci-lint
|
||||
nodePackages.prettier
|
||||
golines
|
||||
clang-tools
|
||||
];
|
||||
} ''
|
||||
${pkgs.nixpkgs-fmt}/bin/nixpkgs-fmt ${./.}
|
||||
${pkgs.golangci-lint}/bin/golangci-lint run --fix --timeout 10m
|
||||
${pkgs.nodePackages.prettier}/bin/prettier --write '**/**.{ts,js,md,yaml,yml,sass,css,scss,html}'
|
||||
${pkgs.golines}/bin/golines --max-len=88 --base-formatter=gofumpt -w ${./.}
|
||||
${pkgs.clang-tools}/bin/clang-format -style="{BasedOnStyle: Google, IndentWidth: 4, AlignConsecutiveDeclarations: true, AlignConsecutiveAssignments: true, ColumnLimit: 0}" -i ${./.}
|
||||
'';
|
||||
};
|
||||
|
||||
|
||||
});
|
||||
} ''
|
||||
${pkgs.nixpkgs-fmt}/bin/nixpkgs-fmt ${./.}
|
||||
${pkgs.golangci-lint}/bin/golangci-lint run --fix --timeout 10m
|
||||
${pkgs.nodePackages.prettier}/bin/prettier --write '**/**.{ts,js,md,yaml,yml,sass,css,scss,html}'
|
||||
${pkgs.golines}/bin/golines --max-len=88 --base-formatter=gofumpt -w ${./.}
|
||||
${pkgs.clang-tools}/bin/clang-format -style="{BasedOnStyle: Google, IndentWidth: 4, AlignConsecutiveDeclarations: true, AlignConsecutiveAssignments: true, ColumnLimit: 0}" -i ${./.}
|
||||
'';
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/apikey.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/device.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/headscale.proto
|
||||
|
||||
@@ -36,7 +36,7 @@ var file_headscale_v1_headscale_proto_rawDesc = []byte{
|
||||
0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76,
|
||||
0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69,
|
||||
0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0xb1, 0x16, 0x0a, 0x10, 0x48, 0x65,
|
||||
0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0x81, 0x18, 0x0a, 0x10, 0x48, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x77,
|
||||
0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65,
|
||||
@@ -51,9 +51,9 @@ var file_headscale_v1_headscale_proto_rawDesc = []byte{
|
||||
0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x16, 0x22,
|
||||
0x11, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
|
||||
0x63, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x96, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x16, 0x3a,
|
||||
0x01, 0x2a, 0x22, 0x11, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x96, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x24, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4e,
|
||||
0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
@@ -85,17 +85,17 @@ var file_headscale_v1_headscale_proto_rawDesc = []byte{
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75,
|
||||
0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1d, 0x82,
|
||||
0xd3, 0xe4, 0x93, 0x02, 0x17, 0x22, 0x12, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70,
|
||||
0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x87, 0x01, 0x0a,
|
||||
0xd3, 0xe4, 0x93, 0x02, 0x17, 0x3a, 0x01, 0x2a, 0x22, 0x12, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76,
|
||||
0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x12, 0x87, 0x01, 0x0a,
|
||||
0x10, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65,
|
||||
0x79, 0x12, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65,
|
||||
0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72,
|
||||
0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x22, 0x19, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76,
|
||||
0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x2f, 0x65, 0x78, 0x70,
|
||||
0x69, 0x72, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x7a, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72,
|
||||
0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x3a, 0x01, 0x2a, 0x22, 0x19, 0x2f, 0x61, 0x70,
|
||||
0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x2f,
|
||||
0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x12, 0x7a, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72,
|
||||
0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x24, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x65,
|
||||
0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
@@ -110,8 +110,8 @@ var file_headscale_v1_headscale_proto_rawDesc = []byte{
|
||||
0x73, 0x74, 0x1a, 0x28, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3,
|
||||
0xe4, 0x93, 0x02, 0x1a, 0x22, 0x15, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65,
|
||||
0x62, 0x75, 0x67, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x75,
|
||||
0xe4, 0x93, 0x02, 0x1a, 0x3a, 0x01, 0x2a, 0x22, 0x15, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12, 0x75,
|
||||
0x0a, 0x0a, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12, 0x1f, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e,
|
||||
@@ -124,9 +124,9 @@ var file_headscale_v1_headscale_proto_rawDesc = []byte{
|
||||
0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65,
|
||||
0x74, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2c, 0x82,
|
||||
0xd3, 0xe4, 0x93, 0x02, 0x26, 0x22, 0x21, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f,
|
||||
0x69, 0x64, 0x7d, 0x2f, 0x74, 0x61, 0x67, 0x73, 0x3a, 0x01, 0x2a, 0x12, 0x80, 0x01, 0x0a, 0x0f,
|
||||
0xd3, 0xe4, 0x93, 0x02, 0x26, 0x3a, 0x01, 0x2a, 0x22, 0x21, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76,
|
||||
0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x6e, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x74, 0x61, 0x67, 0x73, 0x12, 0x80, 0x01, 0x0a, 0x0f,
|
||||
0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12,
|
||||
0x24, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52,
|
||||
0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65,
|
||||
@@ -175,24 +175,37 @@ var file_headscale_v1_headscale_proto_rawDesc = []byte{
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2e,
|
||||
0x82, 0xd3, 0xe4, 0x93, 0x02, 0x28, 0x22, 0x26, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f,
|
||||
0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x8b,
|
||||
0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x12, 0x24, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74,
|
||||
0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
|
||||
0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x12, 0x23, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x97, 0x01, 0x0a,
|
||||
0x13, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f,
|
||||
0x75, 0x74, 0x65, 0x73, 0x12, 0x28, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e,
|
||||
0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x64,
|
||||
0x0a, 0x09, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f,
|
||||
0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f,
|
||||
0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3,
|
||||
0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f,
|
||||
0x75, 0x74, 0x65, 0x73, 0x12, 0x7c, 0x0a, 0x0b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f,
|
||||
0x75, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x28, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x22,
|
||||
0x22, 0x20, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x2f, 0x7b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x65, 0x6e, 0x61, 0x62,
|
||||
0x6c, 0x65, 0x12, 0x80, 0x01, 0x0a, 0x0c, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f,
|
||||
0x75, 0x74, 0x65, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52,
|
||||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93,
|
||||
0x02, 0x23, 0x22, 0x21, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74,
|
||||
0x65, 0x73, 0x2f, 0x7b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x64, 0x69,
|
||||
0x73, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x8e, 0x01, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x25, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x1a, 0x26, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02,
|
||||
0x25, 0x22, 0x23, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x25, 0x12, 0x23, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f,
|
||||
0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x70, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
|
||||
0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
@@ -200,16 +213,16 @@ var file_headscale_v1_headscale_proto_rawDesc = []byte{
|
||||
0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41,
|
||||
0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x19, 0x82,
|
||||
0xd3, 0xe4, 0x93, 0x02, 0x13, 0x22, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61,
|
||||
0x70, 0x69, 0x6b, 0x65, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x77, 0x0a, 0x0c, 0x45, 0x78, 0x70, 0x69,
|
||||
0xd3, 0xe4, 0x93, 0x02, 0x13, 0x3a, 0x01, 0x2a, 0x22, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76,
|
||||
0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x12, 0x77, 0x0a, 0x0c, 0x45, 0x78, 0x70, 0x69,
|
||||
0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70,
|
||||
0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72,
|
||||
0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
|
||||
0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x22, 0x15, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x3a, 0x01,
|
||||
0x2a, 0x12, 0x6a, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73,
|
||||
0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x3a, 0x01, 0x2a, 0x22, 0x15, 0x2f, 0x61, 0x70, 0x69,
|
||||
0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72,
|
||||
0x65, 0x12, 0x6a, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73,
|
||||
0x12, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
@@ -222,50 +235,54 @@ var file_headscale_v1_headscale_proto_rawDesc = []byte{
|
||||
}
|
||||
|
||||
var file_headscale_v1_headscale_proto_goTypes = []interface{}{
|
||||
(*GetNamespaceRequest)(nil), // 0: headscale.v1.GetNamespaceRequest
|
||||
(*CreateNamespaceRequest)(nil), // 1: headscale.v1.CreateNamespaceRequest
|
||||
(*RenameNamespaceRequest)(nil), // 2: headscale.v1.RenameNamespaceRequest
|
||||
(*DeleteNamespaceRequest)(nil), // 3: headscale.v1.DeleteNamespaceRequest
|
||||
(*ListNamespacesRequest)(nil), // 4: headscale.v1.ListNamespacesRequest
|
||||
(*CreatePreAuthKeyRequest)(nil), // 5: headscale.v1.CreatePreAuthKeyRequest
|
||||
(*ExpirePreAuthKeyRequest)(nil), // 6: headscale.v1.ExpirePreAuthKeyRequest
|
||||
(*ListPreAuthKeysRequest)(nil), // 7: headscale.v1.ListPreAuthKeysRequest
|
||||
(*DebugCreateMachineRequest)(nil), // 8: headscale.v1.DebugCreateMachineRequest
|
||||
(*GetMachineRequest)(nil), // 9: headscale.v1.GetMachineRequest
|
||||
(*SetTagsRequest)(nil), // 10: headscale.v1.SetTagsRequest
|
||||
(*RegisterMachineRequest)(nil), // 11: headscale.v1.RegisterMachineRequest
|
||||
(*DeleteMachineRequest)(nil), // 12: headscale.v1.DeleteMachineRequest
|
||||
(*ExpireMachineRequest)(nil), // 13: headscale.v1.ExpireMachineRequest
|
||||
(*RenameMachineRequest)(nil), // 14: headscale.v1.RenameMachineRequest
|
||||
(*ListMachinesRequest)(nil), // 15: headscale.v1.ListMachinesRequest
|
||||
(*MoveMachineRequest)(nil), // 16: headscale.v1.MoveMachineRequest
|
||||
(*GetMachineRouteRequest)(nil), // 17: headscale.v1.GetMachineRouteRequest
|
||||
(*EnableMachineRoutesRequest)(nil), // 18: headscale.v1.EnableMachineRoutesRequest
|
||||
(*CreateApiKeyRequest)(nil), // 19: headscale.v1.CreateApiKeyRequest
|
||||
(*ExpireApiKeyRequest)(nil), // 20: headscale.v1.ExpireApiKeyRequest
|
||||
(*ListApiKeysRequest)(nil), // 21: headscale.v1.ListApiKeysRequest
|
||||
(*GetNamespaceResponse)(nil), // 22: headscale.v1.GetNamespaceResponse
|
||||
(*CreateNamespaceResponse)(nil), // 23: headscale.v1.CreateNamespaceResponse
|
||||
(*RenameNamespaceResponse)(nil), // 24: headscale.v1.RenameNamespaceResponse
|
||||
(*DeleteNamespaceResponse)(nil), // 25: headscale.v1.DeleteNamespaceResponse
|
||||
(*ListNamespacesResponse)(nil), // 26: headscale.v1.ListNamespacesResponse
|
||||
(*CreatePreAuthKeyResponse)(nil), // 27: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 28: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysResponse)(nil), // 29: headscale.v1.ListPreAuthKeysResponse
|
||||
(*DebugCreateMachineResponse)(nil), // 30: headscale.v1.DebugCreateMachineResponse
|
||||
(*GetMachineResponse)(nil), // 31: headscale.v1.GetMachineResponse
|
||||
(*SetTagsResponse)(nil), // 32: headscale.v1.SetTagsResponse
|
||||
(*RegisterMachineResponse)(nil), // 33: headscale.v1.RegisterMachineResponse
|
||||
(*DeleteMachineResponse)(nil), // 34: headscale.v1.DeleteMachineResponse
|
||||
(*ExpireMachineResponse)(nil), // 35: headscale.v1.ExpireMachineResponse
|
||||
(*RenameMachineResponse)(nil), // 36: headscale.v1.RenameMachineResponse
|
||||
(*ListMachinesResponse)(nil), // 37: headscale.v1.ListMachinesResponse
|
||||
(*MoveMachineResponse)(nil), // 38: headscale.v1.MoveMachineResponse
|
||||
(*GetMachineRouteResponse)(nil), // 39: headscale.v1.GetMachineRouteResponse
|
||||
(*EnableMachineRoutesResponse)(nil), // 40: headscale.v1.EnableMachineRoutesResponse
|
||||
(*CreateApiKeyResponse)(nil), // 41: headscale.v1.CreateApiKeyResponse
|
||||
(*ExpireApiKeyResponse)(nil), // 42: headscale.v1.ExpireApiKeyResponse
|
||||
(*ListApiKeysResponse)(nil), // 43: headscale.v1.ListApiKeysResponse
|
||||
(*GetNamespaceRequest)(nil), // 0: headscale.v1.GetNamespaceRequest
|
||||
(*CreateNamespaceRequest)(nil), // 1: headscale.v1.CreateNamespaceRequest
|
||||
(*RenameNamespaceRequest)(nil), // 2: headscale.v1.RenameNamespaceRequest
|
||||
(*DeleteNamespaceRequest)(nil), // 3: headscale.v1.DeleteNamespaceRequest
|
||||
(*ListNamespacesRequest)(nil), // 4: headscale.v1.ListNamespacesRequest
|
||||
(*CreatePreAuthKeyRequest)(nil), // 5: headscale.v1.CreatePreAuthKeyRequest
|
||||
(*ExpirePreAuthKeyRequest)(nil), // 6: headscale.v1.ExpirePreAuthKeyRequest
|
||||
(*ListPreAuthKeysRequest)(nil), // 7: headscale.v1.ListPreAuthKeysRequest
|
||||
(*DebugCreateMachineRequest)(nil), // 8: headscale.v1.DebugCreateMachineRequest
|
||||
(*GetMachineRequest)(nil), // 9: headscale.v1.GetMachineRequest
|
||||
(*SetTagsRequest)(nil), // 10: headscale.v1.SetTagsRequest
|
||||
(*RegisterMachineRequest)(nil), // 11: headscale.v1.RegisterMachineRequest
|
||||
(*DeleteMachineRequest)(nil), // 12: headscale.v1.DeleteMachineRequest
|
||||
(*ExpireMachineRequest)(nil), // 13: headscale.v1.ExpireMachineRequest
|
||||
(*RenameMachineRequest)(nil), // 14: headscale.v1.RenameMachineRequest
|
||||
(*ListMachinesRequest)(nil), // 15: headscale.v1.ListMachinesRequest
|
||||
(*MoveMachineRequest)(nil), // 16: headscale.v1.MoveMachineRequest
|
||||
(*GetRoutesRequest)(nil), // 17: headscale.v1.GetRoutesRequest
|
||||
(*EnableRouteRequest)(nil), // 18: headscale.v1.EnableRouteRequest
|
||||
(*DisableRouteRequest)(nil), // 19: headscale.v1.DisableRouteRequest
|
||||
(*GetMachineRoutesRequest)(nil), // 20: headscale.v1.GetMachineRoutesRequest
|
||||
(*CreateApiKeyRequest)(nil), // 21: headscale.v1.CreateApiKeyRequest
|
||||
(*ExpireApiKeyRequest)(nil), // 22: headscale.v1.ExpireApiKeyRequest
|
||||
(*ListApiKeysRequest)(nil), // 23: headscale.v1.ListApiKeysRequest
|
||||
(*GetNamespaceResponse)(nil), // 24: headscale.v1.GetNamespaceResponse
|
||||
(*CreateNamespaceResponse)(nil), // 25: headscale.v1.CreateNamespaceResponse
|
||||
(*RenameNamespaceResponse)(nil), // 26: headscale.v1.RenameNamespaceResponse
|
||||
(*DeleteNamespaceResponse)(nil), // 27: headscale.v1.DeleteNamespaceResponse
|
||||
(*ListNamespacesResponse)(nil), // 28: headscale.v1.ListNamespacesResponse
|
||||
(*CreatePreAuthKeyResponse)(nil), // 29: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 30: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysResponse)(nil), // 31: headscale.v1.ListPreAuthKeysResponse
|
||||
(*DebugCreateMachineResponse)(nil), // 32: headscale.v1.DebugCreateMachineResponse
|
||||
(*GetMachineResponse)(nil), // 33: headscale.v1.GetMachineResponse
|
||||
(*SetTagsResponse)(nil), // 34: headscale.v1.SetTagsResponse
|
||||
(*RegisterMachineResponse)(nil), // 35: headscale.v1.RegisterMachineResponse
|
||||
(*DeleteMachineResponse)(nil), // 36: headscale.v1.DeleteMachineResponse
|
||||
(*ExpireMachineResponse)(nil), // 37: headscale.v1.ExpireMachineResponse
|
||||
(*RenameMachineResponse)(nil), // 38: headscale.v1.RenameMachineResponse
|
||||
(*ListMachinesResponse)(nil), // 39: headscale.v1.ListMachinesResponse
|
||||
(*MoveMachineResponse)(nil), // 40: headscale.v1.MoveMachineResponse
|
||||
(*GetRoutesResponse)(nil), // 41: headscale.v1.GetRoutesResponse
|
||||
(*EnableRouteResponse)(nil), // 42: headscale.v1.EnableRouteResponse
|
||||
(*DisableRouteResponse)(nil), // 43: headscale.v1.DisableRouteResponse
|
||||
(*GetMachineRoutesResponse)(nil), // 44: headscale.v1.GetMachineRoutesResponse
|
||||
(*CreateApiKeyResponse)(nil), // 45: headscale.v1.CreateApiKeyResponse
|
||||
(*ExpireApiKeyResponse)(nil), // 46: headscale.v1.ExpireApiKeyResponse
|
||||
(*ListApiKeysResponse)(nil), // 47: headscale.v1.ListApiKeysResponse
|
||||
}
|
||||
var file_headscale_v1_headscale_proto_depIdxs = []int32{
|
||||
0, // 0: headscale.v1.HeadscaleService.GetNamespace:input_type -> headscale.v1.GetNamespaceRequest
|
||||
@@ -285,35 +302,39 @@ var file_headscale_v1_headscale_proto_depIdxs = []int32{
|
||||
14, // 14: headscale.v1.HeadscaleService.RenameMachine:input_type -> headscale.v1.RenameMachineRequest
|
||||
15, // 15: headscale.v1.HeadscaleService.ListMachines:input_type -> headscale.v1.ListMachinesRequest
|
||||
16, // 16: headscale.v1.HeadscaleService.MoveMachine:input_type -> headscale.v1.MoveMachineRequest
|
||||
17, // 17: headscale.v1.HeadscaleService.GetMachineRoute:input_type -> headscale.v1.GetMachineRouteRequest
|
||||
18, // 18: headscale.v1.HeadscaleService.EnableMachineRoutes:input_type -> headscale.v1.EnableMachineRoutesRequest
|
||||
19, // 19: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
|
||||
20, // 20: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
|
||||
21, // 21: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
|
||||
22, // 22: headscale.v1.HeadscaleService.GetNamespace:output_type -> headscale.v1.GetNamespaceResponse
|
||||
23, // 23: headscale.v1.HeadscaleService.CreateNamespace:output_type -> headscale.v1.CreateNamespaceResponse
|
||||
24, // 24: headscale.v1.HeadscaleService.RenameNamespace:output_type -> headscale.v1.RenameNamespaceResponse
|
||||
25, // 25: headscale.v1.HeadscaleService.DeleteNamespace:output_type -> headscale.v1.DeleteNamespaceResponse
|
||||
26, // 26: headscale.v1.HeadscaleService.ListNamespaces:output_type -> headscale.v1.ListNamespacesResponse
|
||||
27, // 27: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
|
||||
28, // 28: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
|
||||
29, // 29: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
|
||||
30, // 30: headscale.v1.HeadscaleService.DebugCreateMachine:output_type -> headscale.v1.DebugCreateMachineResponse
|
||||
31, // 31: headscale.v1.HeadscaleService.GetMachine:output_type -> headscale.v1.GetMachineResponse
|
||||
32, // 32: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
|
||||
33, // 33: headscale.v1.HeadscaleService.RegisterMachine:output_type -> headscale.v1.RegisterMachineResponse
|
||||
34, // 34: headscale.v1.HeadscaleService.DeleteMachine:output_type -> headscale.v1.DeleteMachineResponse
|
||||
35, // 35: headscale.v1.HeadscaleService.ExpireMachine:output_type -> headscale.v1.ExpireMachineResponse
|
||||
36, // 36: headscale.v1.HeadscaleService.RenameMachine:output_type -> headscale.v1.RenameMachineResponse
|
||||
37, // 37: headscale.v1.HeadscaleService.ListMachines:output_type -> headscale.v1.ListMachinesResponse
|
||||
38, // 38: headscale.v1.HeadscaleService.MoveMachine:output_type -> headscale.v1.MoveMachineResponse
|
||||
39, // 39: headscale.v1.HeadscaleService.GetMachineRoute:output_type -> headscale.v1.GetMachineRouteResponse
|
||||
40, // 40: headscale.v1.HeadscaleService.EnableMachineRoutes:output_type -> headscale.v1.EnableMachineRoutesResponse
|
||||
41, // 41: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
|
||||
42, // 42: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
|
||||
43, // 43: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
|
||||
22, // [22:44] is the sub-list for method output_type
|
||||
0, // [0:22] is the sub-list for method input_type
|
||||
17, // 17: headscale.v1.HeadscaleService.GetRoutes:input_type -> headscale.v1.GetRoutesRequest
|
||||
18, // 18: headscale.v1.HeadscaleService.EnableRoute:input_type -> headscale.v1.EnableRouteRequest
|
||||
19, // 19: headscale.v1.HeadscaleService.DisableRoute:input_type -> headscale.v1.DisableRouteRequest
|
||||
20, // 20: headscale.v1.HeadscaleService.GetMachineRoutes:input_type -> headscale.v1.GetMachineRoutesRequest
|
||||
21, // 21: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
|
||||
22, // 22: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
|
||||
23, // 23: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
|
||||
24, // 24: headscale.v1.HeadscaleService.GetNamespace:output_type -> headscale.v1.GetNamespaceResponse
|
||||
25, // 25: headscale.v1.HeadscaleService.CreateNamespace:output_type -> headscale.v1.CreateNamespaceResponse
|
||||
26, // 26: headscale.v1.HeadscaleService.RenameNamespace:output_type -> headscale.v1.RenameNamespaceResponse
|
||||
27, // 27: headscale.v1.HeadscaleService.DeleteNamespace:output_type -> headscale.v1.DeleteNamespaceResponse
|
||||
28, // 28: headscale.v1.HeadscaleService.ListNamespaces:output_type -> headscale.v1.ListNamespacesResponse
|
||||
29, // 29: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
|
||||
30, // 30: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
|
||||
31, // 31: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
|
||||
32, // 32: headscale.v1.HeadscaleService.DebugCreateMachine:output_type -> headscale.v1.DebugCreateMachineResponse
|
||||
33, // 33: headscale.v1.HeadscaleService.GetMachine:output_type -> headscale.v1.GetMachineResponse
|
||||
34, // 34: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
|
||||
35, // 35: headscale.v1.HeadscaleService.RegisterMachine:output_type -> headscale.v1.RegisterMachineResponse
|
||||
36, // 36: headscale.v1.HeadscaleService.DeleteMachine:output_type -> headscale.v1.DeleteMachineResponse
|
||||
37, // 37: headscale.v1.HeadscaleService.ExpireMachine:output_type -> headscale.v1.ExpireMachineResponse
|
||||
38, // 38: headscale.v1.HeadscaleService.RenameMachine:output_type -> headscale.v1.RenameMachineResponse
|
||||
39, // 39: headscale.v1.HeadscaleService.ListMachines:output_type -> headscale.v1.ListMachinesResponse
|
||||
40, // 40: headscale.v1.HeadscaleService.MoveMachine:output_type -> headscale.v1.MoveMachineResponse
|
||||
41, // 41: headscale.v1.HeadscaleService.GetRoutes:output_type -> headscale.v1.GetRoutesResponse
|
||||
42, // 42: headscale.v1.HeadscaleService.EnableRoute:output_type -> headscale.v1.EnableRouteResponse
|
||||
43, // 43: headscale.v1.HeadscaleService.DisableRoute:output_type -> headscale.v1.DisableRouteResponse
|
||||
44, // 44: headscale.v1.HeadscaleService.GetMachineRoutes:output_type -> headscale.v1.GetMachineRoutesResponse
|
||||
45, // 45: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
|
||||
46, // 46: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
|
||||
47, // 47: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
|
||||
24, // [24:48] is the sub-list for method output_type
|
||||
0, // [0:24] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,8 @@
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc (unknown)
|
||||
// source: headscale/v1/headscale.proto
|
||||
|
||||
package v1
|
||||
|
||||
@@ -39,8 +43,10 @@ type HeadscaleServiceClient interface {
|
||||
ListMachines(ctx context.Context, in *ListMachinesRequest, opts ...grpc.CallOption) (*ListMachinesResponse, error)
|
||||
MoveMachine(ctx context.Context, in *MoveMachineRequest, opts ...grpc.CallOption) (*MoveMachineResponse, error)
|
||||
// --- Route start ---
|
||||
GetMachineRoute(ctx context.Context, in *GetMachineRouteRequest, opts ...grpc.CallOption) (*GetMachineRouteResponse, error)
|
||||
EnableMachineRoutes(ctx context.Context, in *EnableMachineRoutesRequest, opts ...grpc.CallOption) (*EnableMachineRoutesResponse, error)
|
||||
GetRoutes(ctx context.Context, in *GetRoutesRequest, opts ...grpc.CallOption) (*GetRoutesResponse, error)
|
||||
EnableRoute(ctx context.Context, in *EnableRouteRequest, opts ...grpc.CallOption) (*EnableRouteResponse, error)
|
||||
DisableRoute(ctx context.Context, in *DisableRouteRequest, opts ...grpc.CallOption) (*DisableRouteResponse, error)
|
||||
GetMachineRoutes(ctx context.Context, in *GetMachineRoutesRequest, opts ...grpc.CallOption) (*GetMachineRoutesResponse, error)
|
||||
// --- ApiKeys start ---
|
||||
CreateApiKey(ctx context.Context, in *CreateApiKeyRequest, opts ...grpc.CallOption) (*CreateApiKeyResponse, error)
|
||||
ExpireApiKey(ctx context.Context, in *ExpireApiKeyRequest, opts ...grpc.CallOption) (*ExpireApiKeyResponse, error)
|
||||
@@ -208,18 +214,36 @@ func (c *headscaleServiceClient) MoveMachine(ctx context.Context, in *MoveMachin
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) GetMachineRoute(ctx context.Context, in *GetMachineRouteRequest, opts ...grpc.CallOption) (*GetMachineRouteResponse, error) {
|
||||
out := new(GetMachineRouteResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/GetMachineRoute", in, out, opts...)
|
||||
func (c *headscaleServiceClient) GetRoutes(ctx context.Context, in *GetRoutesRequest, opts ...grpc.CallOption) (*GetRoutesResponse, error) {
|
||||
out := new(GetRoutesResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/GetRoutes", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) EnableMachineRoutes(ctx context.Context, in *EnableMachineRoutesRequest, opts ...grpc.CallOption) (*EnableMachineRoutesResponse, error) {
|
||||
out := new(EnableMachineRoutesResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/EnableMachineRoutes", in, out, opts...)
|
||||
func (c *headscaleServiceClient) EnableRoute(ctx context.Context, in *EnableRouteRequest, opts ...grpc.CallOption) (*EnableRouteResponse, error) {
|
||||
out := new(EnableRouteResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/EnableRoute", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) DisableRoute(ctx context.Context, in *DisableRouteRequest, opts ...grpc.CallOption) (*DisableRouteResponse, error) {
|
||||
out := new(DisableRouteResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/DisableRoute", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) GetMachineRoutes(ctx context.Context, in *GetMachineRoutesRequest, opts ...grpc.CallOption) (*GetMachineRoutesResponse, error) {
|
||||
out := new(GetMachineRoutesResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/GetMachineRoutes", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -278,8 +302,10 @@ type HeadscaleServiceServer interface {
|
||||
ListMachines(context.Context, *ListMachinesRequest) (*ListMachinesResponse, error)
|
||||
MoveMachine(context.Context, *MoveMachineRequest) (*MoveMachineResponse, error)
|
||||
// --- Route start ---
|
||||
GetMachineRoute(context.Context, *GetMachineRouteRequest) (*GetMachineRouteResponse, error)
|
||||
EnableMachineRoutes(context.Context, *EnableMachineRoutesRequest) (*EnableMachineRoutesResponse, error)
|
||||
GetRoutes(context.Context, *GetRoutesRequest) (*GetRoutesResponse, error)
|
||||
EnableRoute(context.Context, *EnableRouteRequest) (*EnableRouteResponse, error)
|
||||
DisableRoute(context.Context, *DisableRouteRequest) (*DisableRouteResponse, error)
|
||||
GetMachineRoutes(context.Context, *GetMachineRoutesRequest) (*GetMachineRoutesResponse, error)
|
||||
// --- ApiKeys start ---
|
||||
CreateApiKey(context.Context, *CreateApiKeyRequest) (*CreateApiKeyResponse, error)
|
||||
ExpireApiKey(context.Context, *ExpireApiKeyRequest) (*ExpireApiKeyResponse, error)
|
||||
@@ -342,11 +368,17 @@ func (UnimplementedHeadscaleServiceServer) ListMachines(context.Context, *ListMa
|
||||
func (UnimplementedHeadscaleServiceServer) MoveMachine(context.Context, *MoveMachineRequest) (*MoveMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method MoveMachine not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) GetMachineRoute(context.Context, *GetMachineRouteRequest) (*GetMachineRouteResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetMachineRoute not implemented")
|
||||
func (UnimplementedHeadscaleServiceServer) GetRoutes(context.Context, *GetRoutesRequest) (*GetRoutesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetRoutes not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) EnableMachineRoutes(context.Context, *EnableMachineRoutesRequest) (*EnableMachineRoutesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method EnableMachineRoutes not implemented")
|
||||
func (UnimplementedHeadscaleServiceServer) EnableRoute(context.Context, *EnableRouteRequest) (*EnableRouteResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method EnableRoute not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DisableRoute(context.Context, *DisableRouteRequest) (*DisableRouteResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DisableRoute not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) GetMachineRoutes(context.Context, *GetMachineRoutesRequest) (*GetMachineRoutesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetMachineRoutes not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) CreateApiKey(context.Context, *CreateApiKeyRequest) (*CreateApiKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreateApiKey not implemented")
|
||||
@@ -676,38 +708,74 @@ func _HeadscaleService_MoveMachine_Handler(srv interface{}, ctx context.Context,
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_GetMachineRoute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetMachineRouteRequest)
|
||||
func _HeadscaleService_GetRoutes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetRoutesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).GetMachineRoute(ctx, in)
|
||||
return srv.(HeadscaleServiceServer).GetRoutes(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/GetMachineRoute",
|
||||
FullMethod: "/headscale.v1.HeadscaleService/GetRoutes",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).GetMachineRoute(ctx, req.(*GetMachineRouteRequest))
|
||||
return srv.(HeadscaleServiceServer).GetRoutes(ctx, req.(*GetRoutesRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_EnableMachineRoutes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(EnableMachineRoutesRequest)
|
||||
func _HeadscaleService_EnableRoute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(EnableRouteRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).EnableMachineRoutes(ctx, in)
|
||||
return srv.(HeadscaleServiceServer).EnableRoute(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/EnableMachineRoutes",
|
||||
FullMethod: "/headscale.v1.HeadscaleService/EnableRoute",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).EnableMachineRoutes(ctx, req.(*EnableMachineRoutesRequest))
|
||||
return srv.(HeadscaleServiceServer).EnableRoute(ctx, req.(*EnableRouteRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_DisableRoute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DisableRouteRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).DisableRoute(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/DisableRoute",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).DisableRoute(ctx, req.(*DisableRouteRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_GetMachineRoutes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetMachineRoutesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).GetMachineRoutes(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/GetMachineRoutes",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).GetMachineRoutes(ctx, req.(*GetMachineRoutesRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
@@ -842,12 +910,20 @@ var HeadscaleService_ServiceDesc = grpc.ServiceDesc{
|
||||
Handler: _HeadscaleService_MoveMachine_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetMachineRoute",
|
||||
Handler: _HeadscaleService_GetMachineRoute_Handler,
|
||||
MethodName: "GetRoutes",
|
||||
Handler: _HeadscaleService_GetRoutes_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "EnableMachineRoutes",
|
||||
Handler: _HeadscaleService_EnableMachineRoutes_Handler,
|
||||
MethodName: "EnableRoute",
|
||||
Handler: _HeadscaleService_EnableRoute_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DisableRoute",
|
||||
Handler: _HeadscaleService_DisableRoute_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetMachineRoutes",
|
||||
Handler: _HeadscaleService_GetMachineRoutes_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "CreateApiKey",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/machine.proto
|
||||
|
||||
@@ -95,6 +95,7 @@ type Machine struct {
|
||||
InvalidTags []string `protobuf:"bytes,19,rep,name=invalid_tags,json=invalidTags,proto3" json:"invalid_tags,omitempty"`
|
||||
ValidTags []string `protobuf:"bytes,20,rep,name=valid_tags,json=validTags,proto3" json:"valid_tags,omitempty"`
|
||||
GivenName string `protobuf:"bytes,21,opt,name=given_name,json=givenName,proto3" json:"given_name,omitempty"`
|
||||
Online bool `protobuf:"varint,22,opt,name=online,proto3" json:"online,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Machine) Reset() {
|
||||
@@ -248,6 +249,13 @@ func (x *Machine) GetGivenName() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Machine) GetOnline() bool {
|
||||
if x != nil {
|
||||
return x.Online
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type RegisterMachineRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
@@ -1152,7 +1160,7 @@ var file_headscale_v1_machine_proto_rawDesc = []byte{
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70,
|
||||
0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b,
|
||||
0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe5, 0x05, 0x0a, 0x07, 0x4d, 0x61, 0x63,
|
||||
0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xfd, 0x05, 0x0a, 0x07, 0x4d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04,
|
||||
0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f,
|
||||
0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69,
|
||||
@@ -1198,98 +1206,99 @@ var file_headscale_v1_machine_proto_rawDesc = []byte{
|
||||
0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x09,
|
||||
0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x54, 0x61, 0x67, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x67,
|
||||
0x69, 0x76, 0x65, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x09, 0x67, 0x69, 0x76, 0x65, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x4a, 0x04, 0x08, 0x0e, 0x10, 0x12,
|
||||
0x22, 0x48, 0x0a, 0x16, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x63, 0x68,
|
||||
0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61,
|
||||
0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
|
||||
0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x4a, 0x0a, 0x17, 0x52, 0x65,
|
||||
0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x07, 0x6d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x22, 0x32, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d,
|
||||
0x09, 0x67, 0x69, 0x76, 0x65, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x6e,
|
||||
0x6c, 0x69, 0x6e, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x6f, 0x6e, 0x6c, 0x69,
|
||||
0x6e, 0x65, 0x4a, 0x04, 0x08, 0x0e, 0x10, 0x12, 0x22, 0x48, 0x0a, 0x16, 0x52, 0x65, 0x67, 0x69,
|
||||
0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65,
|
||||
0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
|
||||
0x65, 0x79, 0x22, 0x4a, 0x0a, 0x17, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61,
|
||||
0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a,
|
||||
0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61,
|
||||
0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x22, 0x32,
|
||||
0x0a, 0x11, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69,
|
||||
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x49, 0x64, 0x22, 0x45, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x6d, 0x61, 0x63, 0x68,
|
||||
0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x52, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x22, 0x43, 0x0a, 0x0e, 0x53, 0x65, 0x74,
|
||||
0x54, 0x61, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52,
|
||||
0x09, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x49, 0x64, 0x22, 0x45, 0x0a, 0x12, 0x47, 0x65,
|
||||
0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x12, 0x2f, 0x0a, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x15, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x22, 0x43, 0x0a, 0x0e, 0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69,
|
||||
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09,
|
||||
0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x22, 0x42, 0x0a, 0x0f, 0x53, 0x65, 0x74, 0x54, 0x61, 0x67,
|
||||
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x6d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x52, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x22, 0x35, 0x0a, 0x14, 0x44, 0x65,
|
||||
0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x49,
|
||||
0x64, 0x22, 0x17, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x35, 0x0a, 0x14, 0x45, 0x78,
|
||||
0x70, 0x69, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x49,
|
||||
0x64, 0x22, 0x48, 0x0a, 0x15, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x6d, 0x61,
|
||||
0x63, 0x68, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x6e, 0x65, 0x52, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x22, 0x50, 0x0a, 0x14, 0x52,
|
||||
0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69,
|
||||
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x48, 0x0a,
|
||||
0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x07,
|
||||
0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x22, 0x33, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c,
|
||||
0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x49, 0x0a, 0x14,
|
||||
0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x08, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73,
|
||||
0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x08, 0x6d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x12, 0x4d, 0x6f, 0x76, 0x65, 0x4d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a,
|
||||
0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x04, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x46, 0x0a, 0x13, 0x4d, 0x6f,
|
||||
0x76, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x09, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61,
|
||||
0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x22, 0x42,
|
||||
0x0a, 0x0f, 0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x12, 0x2f, 0x0a, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x15, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x6e, 0x65, 0x22, 0x77, 0x0a, 0x19, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x72, 0x65, 0x61, 0x74,
|
||||
0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
|
||||
0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a,
|
||||
0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
|
||||
0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20,
|
||||
0x03, 0x28, 0x09, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0x4d, 0x0a, 0x1a, 0x44,
|
||||
0x65, 0x62, 0x75, 0x67, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x6e, 0x65, 0x22, 0x35, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68,
|
||||
0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61,
|
||||
0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09,
|
||||
0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x49, 0x64, 0x22, 0x17, 0x0a, 0x15, 0x44, 0x65, 0x6c,
|
||||
0x65, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x22, 0x35, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68,
|
||||
0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61,
|
||||
0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09,
|
||||
0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x49, 0x64, 0x22, 0x48, 0x0a, 0x15, 0x45, 0x78, 0x70,
|
||||
0x69, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x07, 0x6d, 0x61, 0x63, 0x68,
|
||||
0x69, 0x6e, 0x65, 0x22, 0x50, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52,
|
||||
0x09, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65,
|
||||
0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65,
|
||||
0x77, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x48, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f,
|
||||
0x0a, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||
0x15, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x22,
|
||||
0x33, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73, 0x52,
|
||||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70,
|
||||
0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73,
|
||||
0x70, 0x61, 0x63, 0x65, 0x22, 0x49, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x63, 0x68,
|
||||
0x69, 0x6e, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x08,
|
||||
0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61,
|
||||
0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x08, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73, 0x22,
|
||||
0x51, 0x0a, 0x12, 0x4d, 0x6f, 0x76, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x6e, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63,
|
||||
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
|
||||
0x63, 0x65, 0x22, 0x46, 0x0a, 0x13, 0x4d, 0x6f, 0x76, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x6d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x52, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2a, 0x82, 0x01, 0x0a, 0x0e, 0x52,
|
||||
0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1f, 0x0a,
|
||||
0x1b, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44,
|
||||
0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1c,
|
||||
0x0a, 0x18, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f,
|
||||
0x44, 0x5f, 0x41, 0x55, 0x54, 0x48, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13,
|
||||
0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f,
|
||||
0x43, 0x4c, 0x49, 0x10, 0x02, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45,
|
||||
0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x4f, 0x49, 0x44, 0x43, 0x10, 0x03, 0x42,
|
||||
0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75,
|
||||
0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x33,
|
||||
0x65, 0x52, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x22, 0x77, 0x0a, 0x19, 0x44, 0x65,
|
||||
0x62, 0x75, 0x67, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73,
|
||||
0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
|
||||
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72,
|
||||
0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x72, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x73, 0x22, 0x4d, 0x0a, 0x1a, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x72, 0x65, 0x61,
|
||||
0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x12, 0x2f, 0x0a, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x15, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x6e, 0x65, 0x2a, 0x82, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4d,
|
||||
0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1f, 0x0a, 0x1b, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45,
|
||||
0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49,
|
||||
0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54,
|
||||
0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x41, 0x55, 0x54, 0x48, 0x5f, 0x4b,
|
||||
0x45, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52,
|
||||
0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x43, 0x4c, 0x49, 0x10, 0x02, 0x12, 0x18, 0x0a,
|
||||
0x14, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44,
|
||||
0x5f, 0x4f, 0x49, 0x44, 0x43, 0x10, 0x03, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75,
|
||||
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f,
|
||||
0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/namespace.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/preauthkey.proto
|
||||
|
||||
@@ -34,6 +34,7 @@ type PreAuthKey struct {
|
||||
Used bool `protobuf:"varint,6,opt,name=used,proto3" json:"used,omitempty"`
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
||||
AclTags []string `protobuf:"bytes,9,rep,name=acl_tags,json=aclTags,proto3" json:"acl_tags,omitempty"`
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) Reset() {
|
||||
@@ -124,6 +125,13 @@ func (x *PreAuthKey) GetCreatedAt() *timestamppb.Timestamp {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) GetAclTags() []string {
|
||||
if x != nil {
|
||||
return x.AclTags
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type CreatePreAuthKeyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
@@ -133,6 +141,7 @@ type CreatePreAuthKeyRequest struct {
|
||||
Reusable bool `protobuf:"varint,2,opt,name=reusable,proto3" json:"reusable,omitempty"`
|
||||
Ephemeral bool `protobuf:"varint,3,opt,name=ephemeral,proto3" json:"ephemeral,omitempty"`
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
AclTags []string `protobuf:"bytes,5,rep,name=acl_tags,json=aclTags,proto3" json:"acl_tags,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyRequest) Reset() {
|
||||
@@ -195,6 +204,13 @@ func (x *CreatePreAuthKeyRequest) GetExpiration() *timestamppb.Timestamp {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyRequest) GetAclTags() []string {
|
||||
if x != nil {
|
||||
return x.AclTags
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type CreatePreAuthKeyResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
@@ -436,7 +452,7 @@ var file_headscale_v1_preauthkey_proto_rawDesc = []byte{
|
||||
0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
|
||||
0x0c, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
|
||||
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x91,
|
||||
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xac,
|
||||
0x02, 0x0a, 0x0a, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x12, 0x1c, 0x0a,
|
||||
0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69,
|
||||
@@ -454,42 +470,45 @@ var file_headscale_v1_preauthkey_proto_rawDesc = []byte{
|
||||
0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
|
||||
0x41, 0x74, 0x22, 0xad, 0x01, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65,
|
||||
0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c,
|
||||
0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08,
|
||||
0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08,
|
||||
0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x70, 0x68, 0x65,
|
||||
0x6d, 0x65, 0x72, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65, 0x70, 0x68,
|
||||
0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
|
||||
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x22, 0x56, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41,
|
||||
0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a,
|
||||
0x0a, 0x0c, 0x70, 0x72, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x0a,
|
||||
0x70, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x22, 0x49, 0x0a, 0x17, 0x45, 0x78,
|
||||
0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
|
||||
0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70,
|
||||
0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x1a, 0x0a, 0x18, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50,
|
||||
0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x22, 0x36, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68,
|
||||
0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e,
|
||||
0x41, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x6c, 0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, 0x09,
|
||||
0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x6c, 0x54, 0x61, 0x67, 0x73, 0x22, 0xc8, 0x01,
|
||||
0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b,
|
||||
0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d,
|
||||
0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61,
|
||||
0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x75, 0x73, 0x61,
|
||||
0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x75, 0x73, 0x61,
|
||||
0x62, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c,
|
||||
0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61,
|
||||
0x6c, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
|
||||
0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
|
||||
0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a,
|
||||
0x08, 0x61, 0x63, 0x6c, 0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52,
|
||||
0x07, 0x61, 0x63, 0x6c, 0x54, 0x61, 0x67, 0x73, 0x22, 0x56, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61,
|
||||
0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x72, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68,
|
||||
0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74,
|
||||
0x68, 0x4b, 0x65, 0x79, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79,
|
||||
0x22, 0x49, 0x0a, 0x17, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74,
|
||||
0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x57, 0x0a, 0x17, 0x4c, 0x69, 0x73,
|
||||
0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68,
|
||||
0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x65, 0x41, 0x75,
|
||||
0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65,
|
||||
0x79, 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
||||
0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
|
||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x1a, 0x0a, 0x18, 0x45,
|
||||
0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x50,
|
||||
0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22,
|
||||
0x57, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65,
|
||||
0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72,
|
||||
0x65, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
|
||||
0x0b, 0x32, 0x18, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x70, 0x72, 0x65,
|
||||
0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68,
|
||||
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f,
|
||||
0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/routes.proto
|
||||
|
||||
@@ -9,6 +9,7 @@ package v1
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
@@ -20,17 +21,24 @@ const (
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type Routes struct {
|
||||
type Route struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
AdvertisedRoutes []string `protobuf:"bytes,1,rep,name=advertised_routes,json=advertisedRoutes,proto3" json:"advertised_routes,omitempty"`
|
||||
EnabledRoutes []string `protobuf:"bytes,2,rep,name=enabled_routes,json=enabledRoutes,proto3" json:"enabled_routes,omitempty"`
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Machine *Machine `protobuf:"bytes,2,opt,name=machine,proto3" json:"machine,omitempty"`
|
||||
Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"`
|
||||
Advertised bool `protobuf:"varint,4,opt,name=advertised,proto3" json:"advertised,omitempty"`
|
||||
Enabled bool `protobuf:"varint,5,opt,name=enabled,proto3" json:"enabled,omitempty"`
|
||||
IsPrimary bool `protobuf:"varint,6,opt,name=is_primary,json=isPrimary,proto3" json:"is_primary,omitempty"`
|
||||
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
||||
UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
|
||||
DeletedAt *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=deleted_at,json=deletedAt,proto3" json:"deleted_at,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Routes) Reset() {
|
||||
*x = Routes{}
|
||||
func (x *Route) Reset() {
|
||||
*x = Route{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
@@ -38,13 +46,13 @@ func (x *Routes) Reset() {
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Routes) String() string {
|
||||
func (x *Route) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Routes) ProtoMessage() {}
|
||||
func (*Route) ProtoMessage() {}
|
||||
|
||||
func (x *Routes) ProtoReflect() protoreflect.Message {
|
||||
func (x *Route) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
@@ -56,35 +64,82 @@ func (x *Routes) ProtoReflect() protoreflect.Message {
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Routes.ProtoReflect.Descriptor instead.
|
||||
func (*Routes) Descriptor() ([]byte, []int) {
|
||||
// Deprecated: Use Route.ProtoReflect.Descriptor instead.
|
||||
func (*Route) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Routes) GetAdvertisedRoutes() []string {
|
||||
func (x *Route) GetId() uint64 {
|
||||
if x != nil {
|
||||
return x.AdvertisedRoutes
|
||||
return x.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Route) GetMachine() *Machine {
|
||||
if x != nil {
|
||||
return x.Machine
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Routes) GetEnabledRoutes() []string {
|
||||
func (x *Route) GetPrefix() string {
|
||||
if x != nil {
|
||||
return x.EnabledRoutes
|
||||
return x.Prefix
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Route) GetAdvertised() bool {
|
||||
if x != nil {
|
||||
return x.Advertised
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *Route) GetEnabled() bool {
|
||||
if x != nil {
|
||||
return x.Enabled
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *Route) GetIsPrimary() bool {
|
||||
if x != nil {
|
||||
return x.IsPrimary
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *Route) GetCreatedAt() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.CreatedAt
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type GetMachineRouteRequest struct {
|
||||
func (x *Route) GetUpdatedAt() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.UpdatedAt
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Route) GetDeletedAt() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.DeletedAt
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type GetRoutesRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
MachineId uint64 `protobuf:"varint,1,opt,name=machine_id,json=machineId,proto3" json:"machine_id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetMachineRouteRequest) Reset() {
|
||||
*x = GetMachineRouteRequest{}
|
||||
func (x *GetRoutesRequest) Reset() {
|
||||
*x = GetRoutesRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
@@ -92,13 +147,13 @@ func (x *GetMachineRouteRequest) Reset() {
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetMachineRouteRequest) String() string {
|
||||
func (x *GetRoutesRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetMachineRouteRequest) ProtoMessage() {}
|
||||
func (*GetRoutesRequest) ProtoMessage() {}
|
||||
|
||||
func (x *GetMachineRouteRequest) ProtoReflect() protoreflect.Message {
|
||||
func (x *GetRoutesRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
@@ -110,28 +165,21 @@ func (x *GetMachineRouteRequest) ProtoReflect() protoreflect.Message {
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetMachineRouteRequest.ProtoReflect.Descriptor instead.
|
||||
func (*GetMachineRouteRequest) Descriptor() ([]byte, []int) {
|
||||
// Deprecated: Use GetRoutesRequest.ProtoReflect.Descriptor instead.
|
||||
func (*GetRoutesRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *GetMachineRouteRequest) GetMachineId() uint64 {
|
||||
if x != nil {
|
||||
return x.MachineId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type GetMachineRouteResponse struct {
|
||||
type GetRoutesResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Routes *Routes `protobuf:"bytes,1,opt,name=routes,proto3" json:"routes,omitempty"`
|
||||
Routes []*Route `protobuf:"bytes,1,rep,name=routes,proto3" json:"routes,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetMachineRouteResponse) Reset() {
|
||||
*x = GetMachineRouteResponse{}
|
||||
func (x *GetRoutesResponse) Reset() {
|
||||
*x = GetRoutesResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
@@ -139,13 +187,13 @@ func (x *GetMachineRouteResponse) Reset() {
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetMachineRouteResponse) String() string {
|
||||
func (x *GetRoutesResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetMachineRouteResponse) ProtoMessage() {}
|
||||
func (*GetRoutesResponse) ProtoMessage() {}
|
||||
|
||||
func (x *GetMachineRouteResponse) ProtoReflect() protoreflect.Message {
|
||||
func (x *GetRoutesResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
@@ -157,29 +205,28 @@ func (x *GetMachineRouteResponse) ProtoReflect() protoreflect.Message {
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetMachineRouteResponse.ProtoReflect.Descriptor instead.
|
||||
func (*GetMachineRouteResponse) Descriptor() ([]byte, []int) {
|
||||
// Deprecated: Use GetRoutesResponse.ProtoReflect.Descriptor instead.
|
||||
func (*GetRoutesResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *GetMachineRouteResponse) GetRoutes() *Routes {
|
||||
func (x *GetRoutesResponse) GetRoutes() []*Route {
|
||||
if x != nil {
|
||||
return x.Routes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type EnableMachineRoutesRequest struct {
|
||||
type EnableRouteRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
MachineId uint64 `protobuf:"varint,1,opt,name=machine_id,json=machineId,proto3" json:"machine_id,omitempty"`
|
||||
Routes []string `protobuf:"bytes,2,rep,name=routes,proto3" json:"routes,omitempty"`
|
||||
RouteId uint64 `protobuf:"varint,1,opt,name=route_id,json=routeId,proto3" json:"route_id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *EnableMachineRoutesRequest) Reset() {
|
||||
*x = EnableMachineRoutesRequest{}
|
||||
func (x *EnableRouteRequest) Reset() {
|
||||
*x = EnableRouteRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
@@ -187,13 +234,13 @@ func (x *EnableMachineRoutesRequest) Reset() {
|
||||
}
|
||||
}
|
||||
|
||||
func (x *EnableMachineRoutesRequest) String() string {
|
||||
func (x *EnableRouteRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*EnableMachineRoutesRequest) ProtoMessage() {}
|
||||
func (*EnableRouteRequest) ProtoMessage() {}
|
||||
|
||||
func (x *EnableMachineRoutesRequest) ProtoReflect() protoreflect.Message {
|
||||
func (x *EnableRouteRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
@@ -205,35 +252,26 @@ func (x *EnableMachineRoutesRequest) ProtoReflect() protoreflect.Message {
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use EnableMachineRoutesRequest.ProtoReflect.Descriptor instead.
|
||||
func (*EnableMachineRoutesRequest) Descriptor() ([]byte, []int) {
|
||||
// Deprecated: Use EnableRouteRequest.ProtoReflect.Descriptor instead.
|
||||
func (*EnableRouteRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *EnableMachineRoutesRequest) GetMachineId() uint64 {
|
||||
func (x *EnableRouteRequest) GetRouteId() uint64 {
|
||||
if x != nil {
|
||||
return x.MachineId
|
||||
return x.RouteId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *EnableMachineRoutesRequest) GetRoutes() []string {
|
||||
if x != nil {
|
||||
return x.Routes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type EnableMachineRoutesResponse struct {
|
||||
type EnableRouteResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Routes *Routes `protobuf:"bytes,1,opt,name=routes,proto3" json:"routes,omitempty"`
|
||||
}
|
||||
|
||||
func (x *EnableMachineRoutesResponse) Reset() {
|
||||
*x = EnableMachineRoutesResponse{}
|
||||
func (x *EnableRouteResponse) Reset() {
|
||||
*x = EnableRouteResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
@@ -241,13 +279,13 @@ func (x *EnableMachineRoutesResponse) Reset() {
|
||||
}
|
||||
}
|
||||
|
||||
func (x *EnableMachineRoutesResponse) String() string {
|
||||
func (x *EnableRouteResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*EnableMachineRoutesResponse) ProtoMessage() {}
|
||||
func (*EnableRouteResponse) ProtoMessage() {}
|
||||
|
||||
func (x *EnableMachineRoutesResponse) ProtoReflect() protoreflect.Message {
|
||||
func (x *EnableRouteResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
@@ -259,12 +297,184 @@ func (x *EnableMachineRoutesResponse) ProtoReflect() protoreflect.Message {
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use EnableMachineRoutesResponse.ProtoReflect.Descriptor instead.
|
||||
func (*EnableMachineRoutesResponse) Descriptor() ([]byte, []int) {
|
||||
// Deprecated: Use EnableRouteResponse.ProtoReflect.Descriptor instead.
|
||||
func (*EnableRouteResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *EnableMachineRoutesResponse) GetRoutes() *Routes {
|
||||
type DisableRouteRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
RouteId uint64 `protobuf:"varint,1,opt,name=route_id,json=routeId,proto3" json:"route_id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DisableRouteRequest) Reset() {
|
||||
*x = DisableRouteRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DisableRouteRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DisableRouteRequest) ProtoMessage() {}
|
||||
|
||||
func (x *DisableRouteRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[5]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DisableRouteRequest.ProtoReflect.Descriptor instead.
|
||||
func (*DisableRouteRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *DisableRouteRequest) GetRouteId() uint64 {
|
||||
if x != nil {
|
||||
return x.RouteId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type DisableRouteResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *DisableRouteResponse) Reset() {
|
||||
*x = DisableRouteResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DisableRouteResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DisableRouteResponse) ProtoMessage() {}
|
||||
|
||||
func (x *DisableRouteResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[6]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DisableRouteResponse.ProtoReflect.Descriptor instead.
|
||||
func (*DisableRouteResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
type GetMachineRoutesRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
MachineId uint64 `protobuf:"varint,1,opt,name=machine_id,json=machineId,proto3" json:"machine_id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetMachineRoutesRequest) Reset() {
|
||||
*x = GetMachineRoutesRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[7]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetMachineRoutesRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetMachineRoutesRequest) ProtoMessage() {}
|
||||
|
||||
func (x *GetMachineRoutesRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[7]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetMachineRoutesRequest.ProtoReflect.Descriptor instead.
|
||||
func (*GetMachineRoutesRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{7}
|
||||
}
|
||||
|
||||
func (x *GetMachineRoutesRequest) GetMachineId() uint64 {
|
||||
if x != nil {
|
||||
return x.MachineId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type GetMachineRoutesResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Routes []*Route `protobuf:"bytes,1,rep,name=routes,proto3" json:"routes,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetMachineRoutesResponse) Reset() {
|
||||
*x = GetMachineRoutesResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[8]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetMachineRoutesResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetMachineRoutesResponse) ProtoMessage() {}
|
||||
|
||||
func (x *GetMachineRoutesResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[8]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetMachineRoutesResponse.ProtoReflect.Descriptor instead.
|
||||
func (*GetMachineRoutesResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{8}
|
||||
}
|
||||
|
||||
func (x *GetMachineRoutesResponse) GetRoutes() []*Route {
|
||||
if x != nil {
|
||||
return x.Routes
|
||||
}
|
||||
@@ -276,34 +486,60 @@ var File_headscale_v1_routes_proto protoreflect.FileDescriptor
|
||||
var file_headscale_v1_routes_proto_rawDesc = []byte{
|
||||
0x0a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72,
|
||||
0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x22, 0x5c, 0x0a, 0x06, 0x52, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65,
|
||||
0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10,
|
||||
0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x12, 0x25, 0x0a, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74,
|
||||
0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65,
|
||||
0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0x37, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4d, 0x61,
|
||||
0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x49, 0x64,
|
||||
0x22, 0x47, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f,
|
||||
0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x06, 0x72,
|
||||
0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x73, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0x53, 0x0a, 0x1a, 0x45, 0x6e, 0x61,
|
||||
0x62, 0x6c, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x6e, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0x4b,
|
||||
0x0a, 0x1b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52,
|
||||
0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a,
|
||||
0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x73, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67,
|
||||
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f,
|
||||
0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e,
|
||||
0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73,
|
||||
0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xea, 0x02, 0x0a, 0x05, 0x52, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64,
|
||||
0x12, 0x2f, 0x0a, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x15, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x64, 0x76,
|
||||
0x65, 0x72, 0x74, 0x69, 0x73, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61,
|
||||
0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61,
|
||||
0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62,
|
||||
0x6c, 0x65, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72,
|
||||
0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61,
|
||||
0x72, 0x79, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74,
|
||||
0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
|
||||
0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a,
|
||||
0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75,
|
||||
0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x64, 0x65, 0x6c, 0x65,
|
||||
0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
|
||||
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65,
|
||||
0x64, 0x41, 0x74, 0x22, 0x12, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x40, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x52, 0x6f,
|
||||
0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x06,
|
||||
0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74,
|
||||
0x65, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0x2f, 0x0a, 0x12, 0x45, 0x6e, 0x61,
|
||||
0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
|
||||
0x19, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x04, 0x52, 0x07, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x45, 0x6e,
|
||||
0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x22, 0x30, 0x0a, 0x13, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74,
|
||||
0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74,
|
||||
0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x72, 0x6f, 0x75, 0x74,
|
||||
0x65, 0x49, 0x64, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f,
|
||||
0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x0a, 0x17, 0x47,
|
||||
0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52,
|
||||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x68,
|
||||
0x69, 0x6e, 0x65, 0x49, 0x64, 0x22, 0x47, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68,
|
||||
0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x12, 0x2b, 0x0a, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
|
||||
0x0b, 0x32, 0x13, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x42, 0x29,
|
||||
0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61,
|
||||
0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f,
|
||||
0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -318,22 +554,32 @@ func file_headscale_v1_routes_proto_rawDescGZIP() []byte {
|
||||
return file_headscale_v1_routes_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_headscale_v1_routes_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
|
||||
var file_headscale_v1_routes_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
|
||||
var file_headscale_v1_routes_proto_goTypes = []interface{}{
|
||||
(*Routes)(nil), // 0: headscale.v1.Routes
|
||||
(*GetMachineRouteRequest)(nil), // 1: headscale.v1.GetMachineRouteRequest
|
||||
(*GetMachineRouteResponse)(nil), // 2: headscale.v1.GetMachineRouteResponse
|
||||
(*EnableMachineRoutesRequest)(nil), // 3: headscale.v1.EnableMachineRoutesRequest
|
||||
(*EnableMachineRoutesResponse)(nil), // 4: headscale.v1.EnableMachineRoutesResponse
|
||||
(*Route)(nil), // 0: headscale.v1.Route
|
||||
(*GetRoutesRequest)(nil), // 1: headscale.v1.GetRoutesRequest
|
||||
(*GetRoutesResponse)(nil), // 2: headscale.v1.GetRoutesResponse
|
||||
(*EnableRouteRequest)(nil), // 3: headscale.v1.EnableRouteRequest
|
||||
(*EnableRouteResponse)(nil), // 4: headscale.v1.EnableRouteResponse
|
||||
(*DisableRouteRequest)(nil), // 5: headscale.v1.DisableRouteRequest
|
||||
(*DisableRouteResponse)(nil), // 6: headscale.v1.DisableRouteResponse
|
||||
(*GetMachineRoutesRequest)(nil), // 7: headscale.v1.GetMachineRoutesRequest
|
||||
(*GetMachineRoutesResponse)(nil), // 8: headscale.v1.GetMachineRoutesResponse
|
||||
(*Machine)(nil), // 9: headscale.v1.Machine
|
||||
(*timestamppb.Timestamp)(nil), // 10: google.protobuf.Timestamp
|
||||
}
|
||||
var file_headscale_v1_routes_proto_depIdxs = []int32{
|
||||
0, // 0: headscale.v1.GetMachineRouteResponse.routes:type_name -> headscale.v1.Routes
|
||||
0, // 1: headscale.v1.EnableMachineRoutesResponse.routes:type_name -> headscale.v1.Routes
|
||||
2, // [2:2] is the sub-list for method output_type
|
||||
2, // [2:2] is the sub-list for method input_type
|
||||
2, // [2:2] is the sub-list for extension type_name
|
||||
2, // [2:2] is the sub-list for extension extendee
|
||||
0, // [0:2] is the sub-list for field type_name
|
||||
9, // 0: headscale.v1.Route.machine:type_name -> headscale.v1.Machine
|
||||
10, // 1: headscale.v1.Route.created_at:type_name -> google.protobuf.Timestamp
|
||||
10, // 2: headscale.v1.Route.updated_at:type_name -> google.protobuf.Timestamp
|
||||
10, // 3: headscale.v1.Route.deleted_at:type_name -> google.protobuf.Timestamp
|
||||
0, // 4: headscale.v1.GetRoutesResponse.routes:type_name -> headscale.v1.Route
|
||||
0, // 5: headscale.v1.GetMachineRoutesResponse.routes:type_name -> headscale.v1.Route
|
||||
6, // [6:6] is the sub-list for method output_type
|
||||
6, // [6:6] is the sub-list for method input_type
|
||||
6, // [6:6] is the sub-list for extension type_name
|
||||
6, // [6:6] is the sub-list for extension extendee
|
||||
0, // [0:6] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_headscale_v1_routes_proto_init() }
|
||||
@@ -341,9 +587,10 @@ func file_headscale_v1_routes_proto_init() {
|
||||
if File_headscale_v1_routes_proto != nil {
|
||||
return
|
||||
}
|
||||
file_headscale_v1_machine_proto_init()
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_headscale_v1_routes_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Routes); i {
|
||||
switch v := v.(*Route); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
@@ -355,7 +602,7 @@ func file_headscale_v1_routes_proto_init() {
|
||||
}
|
||||
}
|
||||
file_headscale_v1_routes_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetMachineRouteRequest); i {
|
||||
switch v := v.(*GetRoutesRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
@@ -367,7 +614,7 @@ func file_headscale_v1_routes_proto_init() {
|
||||
}
|
||||
}
|
||||
file_headscale_v1_routes_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetMachineRouteResponse); i {
|
||||
switch v := v.(*GetRoutesResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
@@ -379,7 +626,7 @@ func file_headscale_v1_routes_proto_init() {
|
||||
}
|
||||
}
|
||||
file_headscale_v1_routes_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*EnableMachineRoutesRequest); i {
|
||||
switch v := v.(*EnableRouteRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
@@ -391,7 +638,55 @@ func file_headscale_v1_routes_proto_init() {
|
||||
}
|
||||
}
|
||||
file_headscale_v1_routes_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*EnableMachineRoutesResponse); i {
|
||||
switch v := v.(*EnableRouteResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_routes_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DisableRouteRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_routes_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DisableRouteResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_routes_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetMachineRoutesRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_routes_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetMachineRoutesResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
@@ -409,7 +704,7 @@ func file_headscale_v1_routes_proto_init() {
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_headscale_v1_routes_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 5,
|
||||
NumMessages: 9,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
|
||||
@@ -367,13 +367,12 @@
|
||||
},
|
||||
"/api/v1/machine/{machineId}/routes": {
|
||||
"get": {
|
||||
"summary": "--- Route start ---",
|
||||
"operationId": "HeadscaleService_GetMachineRoute",
|
||||
"operationId": "HeadscaleService_GetMachineRoutes",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1GetMachineRouteResponse"
|
||||
"$ref": "#/definitions/v1GetMachineRoutesResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
@@ -395,45 +394,6 @@
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
},
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_EnableMachineRoutes",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1EnableMachineRoutesResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "machineId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
},
|
||||
{
|
||||
"name": "routes",
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"collectionFormat": "multi"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/machine/{machineId}/tags": {
|
||||
@@ -722,6 +682,91 @@
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/routes": {
|
||||
"get": {
|
||||
"summary": "--- Route start ---",
|
||||
"operationId": "HeadscaleService_GetRoutes",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1GetRoutesResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/routes/{routeId}/disable": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_DisableRoute",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1DisableRouteResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "routeId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/routes/{routeId}/enable": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_EnableRoute",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1EnableRouteResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "routeId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"definitions": {
|
||||
@@ -824,6 +869,12 @@
|
||||
"expiration": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"aclTags": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -869,13 +920,11 @@
|
||||
"v1DeleteNamespaceResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"v1EnableMachineRoutesResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"routes": {
|
||||
"$ref": "#/definitions/v1Routes"
|
||||
}
|
||||
}
|
||||
"v1DisableRouteResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"v1EnableRouteResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"v1ExpireApiKeyRequest": {
|
||||
"type": "object",
|
||||
@@ -918,11 +967,14 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1GetMachineRouteResponse": {
|
||||
"v1GetMachineRoutesResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"routes": {
|
||||
"$ref": "#/definitions/v1Routes"
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1Route"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -934,6 +986,17 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1GetRoutesResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"routes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1Route"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1ListApiKeysResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -1048,6 +1111,9 @@
|
||||
},
|
||||
"givenName": {
|
||||
"type": "string"
|
||||
},
|
||||
"online": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -1102,6 +1168,12 @@
|
||||
"createdAt": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"aclTags": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -1139,20 +1211,39 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1Routes": {
|
||||
"v1Route": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"advertisedRoutes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
"id": {
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
},
|
||||
"enabledRoutes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
"machine": {
|
||||
"$ref": "#/definitions/v1Machine"
|
||||
},
|
||||
"prefix": {
|
||||
"type": "string"
|
||||
},
|
||||
"advertised": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"enabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"isPrimary": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"createdAt": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"updatedAt": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"deletedAt": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
163
go.mod
163
go.mod
@@ -1,142 +1,151 @@
|
||||
module github.com/juanfont/headscale
|
||||
|
||||
go 1.18
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/AlecAivazis/survey/v2 v2.3.5
|
||||
github.com/AlecAivazis/survey/v2 v2.3.6
|
||||
github.com/ccding/go-stun/stun v0.0.0-20200514191101-4dc67bcdb029
|
||||
github.com/coreos/go-oidc/v3 v3.2.0
|
||||
github.com/cenkalti/backoff/v4 v4.2.0
|
||||
github.com/coreos/go-oidc/v3 v3.4.0
|
||||
github.com/deckarep/golang-set/v2 v2.1.0
|
||||
github.com/efekarakus/termcolor v1.0.1
|
||||
github.com/glebarez/sqlite v1.4.6
|
||||
github.com/gofrs/uuid v4.2.0+incompatible
|
||||
github.com/glebarez/sqlite v1.5.0
|
||||
github.com/gofrs/uuid v4.3.1+incompatible
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.2
|
||||
github.com/klauspost/compress v1.15.9
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.14.0
|
||||
github.com/klauspost/compress v1.15.12
|
||||
github.com/oauth2-proxy/mockoidc v0.0.0-20220308204021-b9169deeb282
|
||||
github.com/ory/dockertest/v3 v3.9.1
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1
|
||||
github.com/prometheus/client_golang v1.13.0
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/prometheus/common v0.37.0
|
||||
github.com/pterm/pterm v0.12.45
|
||||
github.com/puzpuzpuz/xsync v1.4.2
|
||||
github.com/rs/zerolog v1.27.0
|
||||
github.com/spf13/cobra v1.5.0
|
||||
github.com/spf13/viper v1.12.0
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/pterm/pterm v0.12.50
|
||||
github.com/puzpuzpuz/xsync/v2 v2.4.0
|
||||
github.com/rs/zerolog v1.28.0
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/spf13/viper v1.14.0
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/tailscale/hujson v0.0.0-20220630195928-54599719472f
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa
|
||||
golang.org/x/oauth2 v0.0.0-20220808172628-8227340efae7
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
|
||||
google.golang.org/genproto v0.0.0-20220808204814-fd01256a5276
|
||||
google.golang.org/grpc v1.48.0
|
||||
go4.org/netipx v0.0.0-20220925034521-797b0c90d8ab
|
||||
golang.org/x/crypto v0.3.0
|
||||
golang.org/x/net v0.2.0
|
||||
golang.org/x/oauth2 v0.2.0
|
||||
golang.org/x/sync v0.1.0
|
||||
google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd
|
||||
google.golang.org/grpc v1.51.0
|
||||
google.golang.org/protobuf v1.28.1
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gorm.io/driver/postgres v1.3.8
|
||||
gorm.io/gorm v1.23.8
|
||||
inet.af/netaddr v0.0.0-20220617031823-097006376321
|
||||
tailscale.com v1.28.0
|
||||
gorm.io/driver/postgres v1.4.5
|
||||
gorm.io/gorm v1.24.2
|
||||
tailscale.com v1.34.0
|
||||
)
|
||||
|
||||
require (
|
||||
atomicgo.dev/cursor v0.1.1 // indirect
|
||||
atomicgo.dev/keyboard v0.2.8 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
filippo.io/edwards25519 v1.0.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
||||
github.com/akutz/memconn v0.1.0 // indirect
|
||||
github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/containerd/console v1.0.3 // indirect
|
||||
github.com/containerd/continuity v0.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/cli v20.10.16+incompatible // indirect
|
||||
github.com/docker/docker v20.10.16+incompatible // indirect
|
||||
github.com/docker/cli v20.10.21+incompatible // indirect
|
||||
github.com/docker/docker v20.10.21+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||
github.com/glebarez/go-sqlite v1.17.3 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.4.0 // indirect
|
||||
github.com/glebarez/go-sqlite v1.19.5 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/go-github v17.0.0+incompatible // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gookit/color v1.5.0 // indirect
|
||||
github.com/hashicorp/go-version v1.4.0 // indirect
|
||||
github.com/gookit/color v1.5.2 // indirect
|
||||
github.com/hashicorp/go-version v1.6.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/hdevalence/ed25519consensus v0.1.0 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||
github.com/jackc/pgconn v1.12.1 // indirect
|
||||
github.com/jackc/pgconn v1.13.0 // indirect
|
||||
github.com/jackc/pgio v1.0.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgproto3/v2 v2.3.0 // indirect
|
||||
github.com/jackc/pgproto3/v2 v2.3.1 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
|
||||
github.com/jackc/pgtype v1.11.0 // indirect
|
||||
github.com/jackc/pgx/v4 v4.16.1 // indirect
|
||||
github.com/jackc/pgtype v1.13.0 // indirect
|
||||
github.com/jackc/pgx/v4 v4.17.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.4 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/josharian/native v1.0.0 // indirect
|
||||
github.com/jsimonetti/rtnetlink v1.1.2-0.20220408201609-d380b505068b // indirect
|
||||
github.com/jsimonetti/rtnetlink v1.3.0 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/kr/pretty v0.3.0 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/lithammer/fuzzysearch v1.1.5 // indirect
|
||||
github.com/magiconair/properties v1.8.6 // indirect
|
||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/mdlayher/netlink v1.6.0 // indirect
|
||||
github.com/mdlayher/socket v0.2.3 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mdlayher/netlink v1.7.0 // indirect
|
||||
github.com/mdlayher/socket v0.4.0 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||
github.com/mitchellh/go-ps v1.0.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect
|
||||
github.com/moby/term v0.0.0-20221128092401-c43b287e0e0f // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 // indirect
|
||||
github.com/opencontainers/runc v1.1.2 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2 // indirect
|
||||
github.com/opencontainers/runc v1.1.4 // indirect
|
||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.1 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20220927061507-ef77025ab5aa // indirect
|
||||
github.com/rivo/uniseg v0.4.3 // indirect
|
||||
github.com/rogpeppe/go-internal v1.8.1-0.20211023094830-115ce09fd6b4 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
github.com/spf13/afero v1.8.2 // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/spf13/afero v1.9.3 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/subosito/gotenv v1.3.0 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
||||
github.com/subosito/gotenv v1.4.1 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 // indirect
|
||||
go4.org/intern v0.0.0-20211027215823-ae77deb06f29 // indirect
|
||||
go4.org/mem v0.0.0-20210711025021-927187094b94 // indirect
|
||||
go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 // indirect
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e // indirect
|
||||
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d // indirect
|
||||
golang.org/x/term v0.0.0-20220411215600-e5f449aeb171 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect
|
||||
golang.zx2c4.com/wireguard/windows v0.4.10 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
go4.org/mem v0.0.0-20220726221520-4f986261bf13 // indirect
|
||||
golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561 // indirect
|
||||
golang.org/x/mod v0.7.0 // indirect
|
||||
golang.org/x/sys v0.3.0 // indirect
|
||||
golang.org/x/term v0.2.0 // indirect
|
||||
golang.org/x/text v0.5.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.3.0 // indirect
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
gopkg.in/ini.v1 v1.66.4 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.5.1 // indirect
|
||||
modernc.org/libc v1.16.8 // indirect
|
||||
modernc.org/mathutil v1.4.1 // indirect
|
||||
modernc.org/memory v1.1.1 // indirect
|
||||
modernc.org/sqlite v1.17.3 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
modernc.org/libc v1.21.5 // indirect
|
||||
modernc.org/mathutil v1.5.0 // indirect
|
||||
modernc.org/memory v1.5.0 // indirect
|
||||
modernc.org/sqlite v1.20.0 // indirect
|
||||
nhooyr.io/websocket v1.8.7 // indirect
|
||||
)
|
||||
|
||||
71
grpcv1.go
71
grpcv1.go
@@ -1,4 +1,4 @@
|
||||
//nolint
|
||||
// nolint
|
||||
package headscale
|
||||
|
||||
import (
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
type headscaleV1APIServer struct { // v1.HeadscaleServiceServer
|
||||
@@ -106,11 +107,21 @@ func (api headscaleV1APIServer) CreatePreAuthKey(
|
||||
expiration = request.GetExpiration().AsTime()
|
||||
}
|
||||
|
||||
for _, tag := range request.AclTags {
|
||||
err := validateTag(tag)
|
||||
if err != nil {
|
||||
return &v1.CreatePreAuthKeyResponse{
|
||||
PreAuthKey: nil,
|
||||
}, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
preAuthKey, err := api.h.CreatePreAuthKey(
|
||||
request.GetNamespace(),
|
||||
request.GetReusable(),
|
||||
request.GetEphemeral(),
|
||||
&expiration,
|
||||
request.AclTags,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -353,36 +364,60 @@ func (api headscaleV1APIServer) MoveMachine(
|
||||
return &v1.MoveMachineResponse{Machine: machine.toProto()}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) GetMachineRoute(
|
||||
func (api headscaleV1APIServer) GetRoutes(
|
||||
ctx context.Context,
|
||||
request *v1.GetMachineRouteRequest,
|
||||
) (*v1.GetMachineRouteResponse, error) {
|
||||
machine, err := api.h.GetMachineByID(request.GetMachineId())
|
||||
request *v1.GetRoutesRequest,
|
||||
) (*v1.GetRoutesResponse, error) {
|
||||
routes, err := api.h.GetRoutes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1.GetMachineRouteResponse{
|
||||
Routes: machine.RoutesToProto(),
|
||||
return &v1.GetRoutesResponse{
|
||||
Routes: Routes(routes).toProto(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) EnableMachineRoutes(
|
||||
func (api headscaleV1APIServer) EnableRoute(
|
||||
ctx context.Context,
|
||||
request *v1.EnableMachineRoutesRequest,
|
||||
) (*v1.EnableMachineRoutesResponse, error) {
|
||||
request *v1.EnableRouteRequest,
|
||||
) (*v1.EnableRouteResponse, error) {
|
||||
err := api.h.EnableRoute(request.GetRouteId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1.EnableRouteResponse{}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) DisableRoute(
|
||||
ctx context.Context,
|
||||
request *v1.DisableRouteRequest,
|
||||
) (*v1.DisableRouteResponse, error) {
|
||||
err := api.h.DisableRoute(request.GetRouteId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1.DisableRouteResponse{}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) GetMachineRoutes(
|
||||
ctx context.Context,
|
||||
request *v1.GetMachineRoutesRequest,
|
||||
) (*v1.GetMachineRoutesResponse, error) {
|
||||
machine, err := api.h.GetMachineByID(request.GetMachineId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = api.h.EnableRoutes(machine, request.GetRoutes()...)
|
||||
routes, err := api.h.GetMachineRoutes(machine)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1.EnableMachineRoutesResponse{
|
||||
Routes: machine.RoutesToProto(),
|
||||
return &v1.GetMachineRoutesResponse{
|
||||
Routes: Routes(routes).toProto(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -469,7 +504,7 @@ func (api headscaleV1APIServer) DebugCreateMachine(
|
||||
Hostname: "DebugTestMachine",
|
||||
}
|
||||
|
||||
givenName, err := api.h.GenerateGivenName(request.GetName())
|
||||
givenName, err := api.h.GenerateGivenName(request.GetKey(), request.GetName())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -487,8 +522,14 @@ func (api headscaleV1APIServer) DebugCreateMachine(
|
||||
HostInfo: HostInfo(hostinfo),
|
||||
}
|
||||
|
||||
nodeKey := key.NodePublic{}
|
||||
err = nodeKey.UnmarshalText([]byte(request.GetKey()))
|
||||
if err != nil {
|
||||
log.Panic().Msg("can not add machine for debug. invalid node key")
|
||||
}
|
||||
|
||||
api.h.registrationCache.Set(
|
||||
request.GetKey(),
|
||||
NodePublicKeyStripPrefix(nodeKey),
|
||||
newMachine,
|
||||
registerCacheExpiration,
|
||||
)
|
||||
|
||||
15
handler_legacy.go
Normal file
15
handler_legacy.go
Normal file
@@ -0,0 +1,15 @@
|
||||
//go:build ts2019
|
||||
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
func (h *Headscale) addLegacyHandlers(router *mux.Router) {
|
||||
router.HandleFunc("/machine/{mkey}/map", h.PollNetMapHandler).
|
||||
Methods(http.MethodPost)
|
||||
router.HandleFunc("/machine/{mkey}", h.RegistrationHandler).Methods(http.MethodPost)
|
||||
}
|
||||
8
handler_placeholder.go
Normal file
8
handler_placeholder.go
Normal file
@@ -0,0 +1,8 @@
|
||||
//go:build !ts2019
|
||||
|
||||
package headscale
|
||||
|
||||
import "github.com/gorilla/mux"
|
||||
|
||||
func (h *Headscale) addLegacyHandlers(router *mux.Router) {
|
||||
}
|
||||
302
integration/auth_oidc_test.go
Normal file
302
integration/auth_oidc_test.go
Normal file
@@ -0,0 +1,302 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
)
|
||||
|
||||
const (
|
||||
dockerContextPath = "../."
|
||||
hsicOIDCMockHashLength = 6
|
||||
oidcServerPort = 10000
|
||||
)
|
||||
|
||||
var errStatusCodeNotOK = errors.New("status code not OK")
|
||||
|
||||
type AuthOIDCScenario struct {
|
||||
*Scenario
|
||||
|
||||
mockOIDC *dockertest.Resource
|
||||
}
|
||||
|
||||
func TestOIDCAuthenticationPingAll(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
baseScenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
scenario := AuthOIDCScenario{
|
||||
Scenario: baseScenario,
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
"namespace1": len(TailscaleVersions),
|
||||
}
|
||||
|
||||
oidcConfig, err := scenario.runMockOIDC()
|
||||
if err != nil {
|
||||
t.Errorf("failed to run mock OIDC server: %s", err)
|
||||
}
|
||||
|
||||
oidcMap := map[string]string{
|
||||
"HEADSCALE_OIDC_ISSUER": oidcConfig.Issuer,
|
||||
"HEADSCALE_OIDC_CLIENT_ID": oidcConfig.ClientID,
|
||||
"HEADSCALE_OIDC_CLIENT_SECRET": oidcConfig.ClientSecret,
|
||||
"HEADSCALE_OIDC_STRIP_EMAIL_DOMAIN": fmt.Sprintf("%t", oidcConfig.StripEmaildomain),
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(
|
||||
spec,
|
||||
hsic.WithTestName("oidcauthping"),
|
||||
hsic.WithConfigEnv(oidcMap),
|
||||
hsic.WithHostnameAsServerURL(),
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
success := 0
|
||||
|
||||
for _, client := range allClients {
|
||||
for _, ip := range allIps {
|
||||
err := client.Ping(ip.String())
|
||||
if err != nil {
|
||||
t.Errorf("failed to ping %s from %s: %s", ip, client.Hostname(), err)
|
||||
} else {
|
||||
success++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *AuthOIDCScenario) CreateHeadscaleEnv(
|
||||
namespaces map[string]int,
|
||||
opts ...hsic.Option,
|
||||
) error {
|
||||
headscale, err := s.Headscale(opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = headscale.WaitForReady()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for namespaceName, clientCount := range namespaces {
|
||||
log.Printf("creating namespace %s with %d clients", namespaceName, clientCount)
|
||||
err = s.CreateNamespace(namespaceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.CreateTailscaleNodesInNamespace(namespaceName, "all", clientCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.runTailscaleUp(namespaceName, headscale.GetEndpoint())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *AuthOIDCScenario) runMockOIDC() (*headscale.OIDCConfig, error) {
|
||||
hash, _ := headscale.GenerateRandomStringDNSSafe(hsicOIDCMockHashLength)
|
||||
|
||||
hostname := fmt.Sprintf("hs-oidcmock-%s", hash)
|
||||
|
||||
mockOidcOptions := &dockertest.RunOptions{
|
||||
Name: hostname,
|
||||
Cmd: []string{"headscale", "mockoidc"},
|
||||
ExposedPorts: []string{"10000/tcp"},
|
||||
PortBindings: map[docker.Port][]docker.PortBinding{
|
||||
"10000/tcp": {{HostPort: "10000"}},
|
||||
},
|
||||
Networks: []*dockertest.Network{s.Scenario.network},
|
||||
Env: []string{
|
||||
fmt.Sprintf("MOCKOIDC_ADDR=%s", hostname),
|
||||
"MOCKOIDC_PORT=10000",
|
||||
"MOCKOIDC_CLIENT_ID=superclient",
|
||||
"MOCKOIDC_CLIENT_SECRET=supersecret",
|
||||
},
|
||||
}
|
||||
|
||||
headscaleBuildOptions := &dockertest.BuildOptions{
|
||||
Dockerfile: "Dockerfile.debug",
|
||||
ContextDir: dockerContextPath,
|
||||
}
|
||||
|
||||
err := s.pool.RemoveContainerByName(hostname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if pmockoidc, err := s.pool.BuildAndRunWithBuildOptions(
|
||||
headscaleBuildOptions,
|
||||
mockOidcOptions,
|
||||
dockertestutil.DockerRestartPolicy); err == nil {
|
||||
s.mockOIDC = pmockoidc
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Println("Waiting for headscale mock oidc to be ready for tests")
|
||||
hostEndpoint := fmt.Sprintf(
|
||||
"%s:%s",
|
||||
s.mockOIDC.GetIPInNetwork(s.network),
|
||||
s.mockOIDC.GetPort(fmt.Sprintf("%d/tcp", oidcServerPort)),
|
||||
)
|
||||
|
||||
if err := s.pool.Retry(func() error {
|
||||
oidcConfigURL := fmt.Sprintf("http://%s/oidc/.well-known/openid-configuration", hostEndpoint)
|
||||
httpClient := &http.Client{}
|
||||
ctx := context.Background()
|
||||
req, _ := http.NewRequestWithContext(ctx, http.MethodGet, oidcConfigURL, nil)
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
log.Printf("headscale mock OIDC tests is not ready: %s\n", err)
|
||||
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return errStatusCodeNotOK
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Printf("headscale mock oidc is ready for tests at %s", hostEndpoint)
|
||||
|
||||
return &headscale.OIDCConfig{
|
||||
Issuer: fmt.Sprintf("http://%s/oidc",
|
||||
net.JoinHostPort(s.mockOIDC.GetIPInNetwork(s.network), strconv.Itoa(oidcServerPort))),
|
||||
ClientID: "superclient",
|
||||
ClientSecret: "supersecret",
|
||||
StripEmaildomain: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *AuthOIDCScenario) runTailscaleUp(
|
||||
namespaceStr, loginServer string,
|
||||
) error {
|
||||
headscale, err := s.Headscale()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("running tailscale up for namespace %s", namespaceStr)
|
||||
if namespace, ok := s.namespaces[namespaceStr]; ok {
|
||||
for _, client := range namespace.Clients {
|
||||
namespace.joinWaitGroup.Add(1)
|
||||
|
||||
go func(c TailscaleClient) {
|
||||
defer namespace.joinWaitGroup.Done()
|
||||
|
||||
// TODO(juanfont): error handle this
|
||||
loginURL, err := c.UpWithLoginURL(loginServer)
|
||||
if err != nil {
|
||||
log.Printf("failed to run tailscale up: %s", err)
|
||||
}
|
||||
|
||||
loginURL.Host = fmt.Sprintf("%s:8080", headscale.GetIP())
|
||||
loginURL.Scheme = "http"
|
||||
|
||||
insecureTransport := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, // nolint
|
||||
}
|
||||
|
||||
log.Printf("%s login url: %s\n", c.Hostname(), loginURL.String())
|
||||
|
||||
httpClient := &http.Client{Transport: insecureTransport}
|
||||
ctx := context.Background()
|
||||
req, _ := http.NewRequestWithContext(ctx, http.MethodGet, loginURL.String(), nil)
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
log.Printf("%s failed to get login url %s: %s", c.Hostname(), loginURL, err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
_, err = io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Printf("%s failed to read response body: %s", c.Hostname(), err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("Finished request for %s to join tailnet", c.Hostname())
|
||||
}(client)
|
||||
|
||||
err = client.WaitForReady()
|
||||
if err != nil {
|
||||
log.Printf("error waiting for client %s to be ready: %s", client.Hostname(), err)
|
||||
}
|
||||
|
||||
log.Printf("client %s is ready", client.Hostname())
|
||||
}
|
||||
|
||||
namespace.joinWaitGroup.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to up tailscale node: %w", errNoNamespaceAvailable)
|
||||
}
|
||||
|
||||
func (s *AuthOIDCScenario) Shutdown() error {
|
||||
err := s.pool.Purge(s.mockOIDC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.Scenario.Shutdown()
|
||||
}
|
||||
366
integration/auth_web_flow_test.go
Normal file
366
integration/auth_web_flow_test.go
Normal file
@@ -0,0 +1,366 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
)
|
||||
|
||||
var errParseAuthPage = errors.New("failed to parse auth page")
|
||||
|
||||
type AuthWebFlowScenario struct {
|
||||
*Scenario
|
||||
}
|
||||
|
||||
func TestAuthWebFlowAuthenticationPingAll(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
baseScenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
scenario := AuthWebFlowScenario{
|
||||
Scenario: baseScenario,
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
"namespace1": len(TailscaleVersions),
|
||||
"namespace2": len(TailscaleVersions),
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec, hsic.WithTestName("webauthping"))
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
success := 0
|
||||
for _, client := range allClients {
|
||||
for _, ip := range allIps {
|
||||
err := client.Ping(ip.String())
|
||||
if err != nil {
|
||||
t.Errorf("failed to ping %s from %s: %s", ip, client.Hostname(), err)
|
||||
} else {
|
||||
success++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthWebFlowLogoutAndRelogin(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
baseScenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
scenario := AuthWebFlowScenario{
|
||||
Scenario: baseScenario,
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
"namespace1": len(TailscaleVersions),
|
||||
"namespace2": len(TailscaleVersions),
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec, hsic.WithTestName("weblogout"))
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
success := 0
|
||||
for _, client := range allClients {
|
||||
for _, ip := range allIps {
|
||||
err := client.Ping(ip.String())
|
||||
if err != nil {
|
||||
t.Errorf("failed to ping %s from %s: %s", ip, client.Hostname(), err)
|
||||
} else {
|
||||
success++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
|
||||
clientIPs := make(map[TailscaleClient][]netip.Addr)
|
||||
for _, client := range allClients {
|
||||
ips, err := client.IPs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get IPs for client %s: %s", client.Hostname(), err)
|
||||
}
|
||||
clientIPs[client] = ips
|
||||
}
|
||||
|
||||
for _, client := range allClients {
|
||||
err := client.Logout()
|
||||
if err != nil {
|
||||
t.Errorf("failed to logout client %s: %s", client.Hostname(), err)
|
||||
}
|
||||
}
|
||||
|
||||
scenario.waitForTailscaleLogout()
|
||||
|
||||
t.Logf("all clients logged out")
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get headscale server: %s", err)
|
||||
}
|
||||
|
||||
for namespaceName := range spec {
|
||||
err = scenario.runTailscaleUp(namespaceName, headscale.GetEndpoint())
|
||||
if err != nil {
|
||||
t.Errorf("failed to run tailscale up: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("all clients logged in again")
|
||||
|
||||
allClients, err = scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
allIps, err = scenario.ListTailscaleClientsIPs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
success = 0
|
||||
for _, client := range allClients {
|
||||
for _, ip := range allIps {
|
||||
err := client.Ping(ip.String())
|
||||
if err != nil {
|
||||
t.Errorf("failed to ping %s from %s: %s", ip, client.Hostname(), err)
|
||||
} else {
|
||||
success++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
|
||||
for _, client := range allClients {
|
||||
ips, err := client.IPs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get IPs for client %s: %s", client.Hostname(), err)
|
||||
}
|
||||
|
||||
// lets check if the IPs are the same
|
||||
if len(ips) != len(clientIPs[client]) {
|
||||
t.Errorf("IPs changed for client %s", client.Hostname())
|
||||
}
|
||||
|
||||
for _, ip := range ips {
|
||||
found := false
|
||||
for _, oldIP := range clientIPs[client] {
|
||||
if ip == oldIP {
|
||||
found = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
t.Errorf("IPs changed for client %s. Used to be %v now %v", client.Hostname(), clientIPs[client], ips)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("all clients IPs are the same")
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *AuthWebFlowScenario) CreateHeadscaleEnv(
|
||||
namespaces map[string]int,
|
||||
opts ...hsic.Option,
|
||||
) error {
|
||||
headscale, err := s.Headscale(opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = headscale.WaitForReady()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for namespaceName, clientCount := range namespaces {
|
||||
log.Printf("creating namespace %s with %d clients", namespaceName, clientCount)
|
||||
err = s.CreateNamespace(namespaceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.CreateTailscaleNodesInNamespace(namespaceName, "all", clientCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.runTailscaleUp(namespaceName, headscale.GetEndpoint())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *AuthWebFlowScenario) waitForTailscaleLogout() {
|
||||
for _, namespace := range s.namespaces {
|
||||
for _, client := range namespace.Clients {
|
||||
namespace.syncWaitGroup.Add(1)
|
||||
|
||||
go func(c TailscaleClient) {
|
||||
defer namespace.syncWaitGroup.Done()
|
||||
|
||||
// TODO(kradalby): error handle this
|
||||
_ = c.WaitForLogout()
|
||||
}(client)
|
||||
}
|
||||
namespace.syncWaitGroup.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *AuthWebFlowScenario) runTailscaleUp(
|
||||
namespaceStr, loginServer string,
|
||||
) error {
|
||||
log.Printf("running tailscale up for namespace %s", namespaceStr)
|
||||
if namespace, ok := s.namespaces[namespaceStr]; ok {
|
||||
for _, client := range namespace.Clients {
|
||||
namespace.joinWaitGroup.Add(1)
|
||||
|
||||
go func(c TailscaleClient) {
|
||||
defer namespace.joinWaitGroup.Done()
|
||||
|
||||
// TODO(juanfont): error handle this
|
||||
loginURL, err := c.UpWithLoginURL(loginServer)
|
||||
if err != nil {
|
||||
log.Printf("failed to run tailscale up: %s", err)
|
||||
}
|
||||
|
||||
err = s.runHeadscaleRegister(namespaceStr, loginURL)
|
||||
if err != nil {
|
||||
log.Printf("failed to register client: %s", err)
|
||||
}
|
||||
}(client)
|
||||
|
||||
err := client.WaitForReady()
|
||||
if err != nil {
|
||||
log.Printf("error waiting for client %s to be ready: %s", client.Hostname(), err)
|
||||
}
|
||||
}
|
||||
namespace.joinWaitGroup.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to up tailscale node: %w", errNoNamespaceAvailable)
|
||||
}
|
||||
|
||||
func (s *AuthWebFlowScenario) runHeadscaleRegister(namespaceStr string, loginURL *url.URL) error {
|
||||
headscale, err := s.Headscale()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("loginURL: %s", loginURL)
|
||||
loginURL.Host = fmt.Sprintf("%s:8080", headscale.GetIP())
|
||||
loginURL.Scheme = "http"
|
||||
|
||||
httpClient := &http.Client{}
|
||||
ctx := context.Background()
|
||||
req, _ := http.NewRequestWithContext(ctx, http.MethodGet, loginURL.String(), nil)
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
// see api.go HTML template
|
||||
codeSep := strings.Split(string(body), "</code>")
|
||||
if len(codeSep) != 2 {
|
||||
return errParseAuthPage
|
||||
}
|
||||
|
||||
keySep := strings.Split(codeSep[0], "key ")
|
||||
if len(keySep) != 2 {
|
||||
return errParseAuthPage
|
||||
}
|
||||
key := keySep[1]
|
||||
log.Printf("registering node %s", key)
|
||||
|
||||
if headscale, err := s.Headscale(); err == nil {
|
||||
_, err = headscale.Execute(
|
||||
[]string{"headscale", "-n", namespaceStr, "nodes", "register", "--key", key},
|
||||
)
|
||||
if err != nil {
|
||||
log.Printf("failed to register node: %s", err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to find headscale: %w", errNoHeadscaleAvailable)
|
||||
}
|
||||
534
integration/cli_test.go
Normal file
534
integration/cli_test.go
Normal file
@@ -0,0 +1,534 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/juanfont/headscale/integration/tsic"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func executeAndUnmarshal[T any](headscale ControlServer, command []string, result T) error {
|
||||
str, err := headscale.Execute(command)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(str), result)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestNamespaceCommand(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario, err := NewScenario()
|
||||
assert.NoError(t, err)
|
||||
|
||||
spec := map[string]int{
|
||||
"namespace1": 0,
|
||||
"namespace2": 0,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
assert.NoError(t, err)
|
||||
|
||||
var listNamespaces []v1.Namespace
|
||||
err = executeAndUnmarshal(headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"namespaces",
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&listNamespaces,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
result := []string{listNamespaces[0].Name, listNamespaces[1].Name}
|
||||
sort.Strings(result)
|
||||
|
||||
assert.Equal(
|
||||
t,
|
||||
[]string{"namespace1", "namespace2"},
|
||||
result,
|
||||
)
|
||||
|
||||
_, err = headscale.Execute(
|
||||
[]string{
|
||||
"headscale",
|
||||
"namespaces",
|
||||
"rename",
|
||||
"--output",
|
||||
"json",
|
||||
"namespace2",
|
||||
"newname",
|
||||
},
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var listAfterRenameNamespaces []v1.Namespace
|
||||
err = executeAndUnmarshal(headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"namespaces",
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&listAfterRenameNamespaces,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
result = []string{listAfterRenameNamespaces[0].Name, listAfterRenameNamespaces[1].Name}
|
||||
sort.Strings(result)
|
||||
|
||||
assert.Equal(
|
||||
t,
|
||||
[]string{"namespace1", "newname"},
|
||||
result,
|
||||
)
|
||||
|
||||
err = scenario.Shutdown()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestPreAuthKeyCommand(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
namespace := "preauthkeyspace"
|
||||
count := 3
|
||||
|
||||
scenario, err := NewScenario()
|
||||
assert.NoError(t, err)
|
||||
|
||||
spec := map[string]int{
|
||||
namespace: 0,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipak"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
assert.NoError(t, err)
|
||||
|
||||
keys := make([]*v1.PreAuthKey, count)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for index := 0; index < count; index++ {
|
||||
var preAuthKey v1.PreAuthKey
|
||||
err := executeAndUnmarshal(
|
||||
headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--namespace",
|
||||
namespace,
|
||||
"create",
|
||||
"--reusable",
|
||||
"--expiration",
|
||||
"24h",
|
||||
"--output",
|
||||
"json",
|
||||
"--tags",
|
||||
"tag:test1,tag:test2",
|
||||
},
|
||||
&preAuthKey,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
keys[index] = &preAuthKey
|
||||
}
|
||||
|
||||
assert.Len(t, keys, 3)
|
||||
|
||||
var listedPreAuthKeys []v1.PreAuthKey
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--namespace",
|
||||
namespace,
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&listedPreAuthKeys,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// There is one key created by "scenario.CreateHeadscaleEnv"
|
||||
assert.Len(t, listedPreAuthKeys, 4)
|
||||
|
||||
assert.Equal(
|
||||
t,
|
||||
[]string{keys[0].Id, keys[1].Id, keys[2].Id},
|
||||
[]string{listedPreAuthKeys[1].Id, listedPreAuthKeys[2].Id, listedPreAuthKeys[3].Id},
|
||||
)
|
||||
|
||||
assert.NotEmpty(t, listedPreAuthKeys[1].Key)
|
||||
assert.NotEmpty(t, listedPreAuthKeys[2].Key)
|
||||
assert.NotEmpty(t, listedPreAuthKeys[3].Key)
|
||||
|
||||
assert.True(t, listedPreAuthKeys[1].Expiration.AsTime().After(time.Now()))
|
||||
assert.True(t, listedPreAuthKeys[2].Expiration.AsTime().After(time.Now()))
|
||||
assert.True(t, listedPreAuthKeys[3].Expiration.AsTime().After(time.Now()))
|
||||
|
||||
assert.True(
|
||||
t,
|
||||
listedPreAuthKeys[1].Expiration.AsTime().Before(time.Now().Add(time.Hour*26)),
|
||||
)
|
||||
assert.True(
|
||||
t,
|
||||
listedPreAuthKeys[2].Expiration.AsTime().Before(time.Now().Add(time.Hour*26)),
|
||||
)
|
||||
assert.True(
|
||||
t,
|
||||
listedPreAuthKeys[3].Expiration.AsTime().Before(time.Now().Add(time.Hour*26)),
|
||||
)
|
||||
|
||||
for index := range listedPreAuthKeys {
|
||||
if index == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
assert.Equal(t, listedPreAuthKeys[index].AclTags, []string{"tag:test1", "tag:test2"})
|
||||
}
|
||||
|
||||
// Test key expiry
|
||||
_, err = headscale.Execute(
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--namespace",
|
||||
namespace,
|
||||
"expire",
|
||||
listedPreAuthKeys[1].Key,
|
||||
},
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var listedPreAuthKeysAfterExpire []v1.PreAuthKey
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--namespace",
|
||||
namespace,
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&listedPreAuthKeysAfterExpire,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.True(t, listedPreAuthKeysAfterExpire[1].Expiration.AsTime().Before(time.Now()))
|
||||
assert.True(t, listedPreAuthKeysAfterExpire[2].Expiration.AsTime().After(time.Now()))
|
||||
assert.True(t, listedPreAuthKeysAfterExpire[3].Expiration.AsTime().After(time.Now()))
|
||||
|
||||
err = scenario.Shutdown()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
namespace := "pre-auth-key-without-exp-namespace"
|
||||
|
||||
scenario, err := NewScenario()
|
||||
assert.NoError(t, err)
|
||||
|
||||
spec := map[string]int{
|
||||
namespace: 0,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipaknaexp"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
assert.NoError(t, err)
|
||||
|
||||
var preAuthKey v1.PreAuthKey
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--namespace",
|
||||
namespace,
|
||||
"create",
|
||||
"--reusable",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&preAuthKey,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var listedPreAuthKeys []v1.PreAuthKey
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--namespace",
|
||||
namespace,
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&listedPreAuthKeys,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// There is one key created by "scenario.CreateHeadscaleEnv"
|
||||
assert.Len(t, listedPreAuthKeys, 2)
|
||||
|
||||
assert.True(t, listedPreAuthKeys[1].Expiration.AsTime().After(time.Now()))
|
||||
assert.True(
|
||||
t,
|
||||
listedPreAuthKeys[1].Expiration.AsTime().Before(time.Now().Add(time.Minute*70)),
|
||||
)
|
||||
|
||||
err = scenario.Shutdown()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
namespace := "pre-auth-key-reus-ephm-namespace"
|
||||
|
||||
scenario, err := NewScenario()
|
||||
assert.NoError(t, err)
|
||||
|
||||
spec := map[string]int{
|
||||
namespace: 0,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipakresueeph"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
assert.NoError(t, err)
|
||||
|
||||
var preAuthReusableKey v1.PreAuthKey
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--namespace",
|
||||
namespace,
|
||||
"create",
|
||||
"--reusable=true",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&preAuthReusableKey,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var preAuthEphemeralKey v1.PreAuthKey
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--namespace",
|
||||
namespace,
|
||||
"create",
|
||||
"--ephemeral=true",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&preAuthEphemeralKey,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.True(t, preAuthEphemeralKey.GetEphemeral())
|
||||
assert.False(t, preAuthEphemeralKey.GetReusable())
|
||||
|
||||
var listedPreAuthKeys []v1.PreAuthKey
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--namespace",
|
||||
namespace,
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&listedPreAuthKeys,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// There is one key created by "scenario.CreateHeadscaleEnv"
|
||||
assert.Len(t, listedPreAuthKeys, 3)
|
||||
|
||||
err = scenario.Shutdown()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestEnablingRoutes(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
namespace := "enable-routing"
|
||||
|
||||
scenario, err := NewScenario()
|
||||
assert.NoError(t, err)
|
||||
|
||||
spec := map[string]int{
|
||||
namespace: 3,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clienableroute"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// advertise routes using the up command
|
||||
for i, client := range allClients {
|
||||
routeStr := fmt.Sprintf("10.0.%d.0/24", i)
|
||||
hostname, _ := client.FQDN()
|
||||
_, _, err = client.Execute([]string{
|
||||
"tailscale",
|
||||
"up",
|
||||
fmt.Sprintf("--advertise-routes=%s", routeStr),
|
||||
"-login-server", headscale.GetEndpoint(),
|
||||
"--hostname", hostname,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
var routes []*v1.Route
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"routes",
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&routes,
|
||||
)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, routes, 3)
|
||||
|
||||
for _, route := range routes {
|
||||
assert.Equal(t, route.Advertised, true)
|
||||
assert.Equal(t, route.Enabled, false)
|
||||
assert.Equal(t, route.IsPrimary, false)
|
||||
}
|
||||
|
||||
for _, route := range routes {
|
||||
_, err = headscale.Execute(
|
||||
[]string{
|
||||
"headscale",
|
||||
"routes",
|
||||
"enable",
|
||||
"--route",
|
||||
strconv.Itoa(int(route.Id)),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
var enablingRoutes []*v1.Route
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"routes",
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&enablingRoutes,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for _, route := range enablingRoutes {
|
||||
assert.Equal(t, route.Advertised, true)
|
||||
assert.Equal(t, route.Enabled, true)
|
||||
assert.Equal(t, route.IsPrimary, true)
|
||||
}
|
||||
|
||||
routeIDToBeDisabled := enablingRoutes[0].Id
|
||||
|
||||
_, err = headscale.Execute(
|
||||
[]string{
|
||||
"headscale",
|
||||
"routes",
|
||||
"disable",
|
||||
"--route",
|
||||
strconv.Itoa(int(routeIDToBeDisabled)),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
var disablingRoutes []*v1.Route
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"routes",
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&disablingRoutes,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for _, route := range disablingRoutes {
|
||||
assert.Equal(t, true, route.Advertised)
|
||||
|
||||
if route.Id == routeIDToBeDisabled {
|
||||
assert.Equal(t, route.Enabled, false)
|
||||
assert.Equal(t, route.IsPrimary, false)
|
||||
} else {
|
||||
assert.Equal(t, route.Enabled, true)
|
||||
assert.Equal(t, route.IsPrimary, true)
|
||||
}
|
||||
}
|
||||
}
|
||||
19
integration/control.go
Normal file
19
integration/control.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
)
|
||||
|
||||
type ControlServer interface {
|
||||
Shutdown() error
|
||||
Execute(command []string) (string, error)
|
||||
GetHealthEndpoint() string
|
||||
GetEndpoint() string
|
||||
WaitForReady() error
|
||||
CreateNamespace(namespace string) error
|
||||
CreateAuthKey(namespace string) (*v1.PreAuthKey, error)
|
||||
ListMachinesInNamespace(namespace string) ([]*v1.Machine, error)
|
||||
GetCert() []byte
|
||||
GetHostname() string
|
||||
GetIP() string
|
||||
}
|
||||
40
integration/dockertestutil/config.go
Normal file
40
integration/dockertestutil/config.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package dockertestutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
)
|
||||
|
||||
func IsRunningInContainer() bool {
|
||||
if _, err := os.Stat("/.dockerenv"); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func DockerRestartPolicy(config *docker.HostConfig) {
|
||||
// set AutoRemove to true so that stopped container goes away by itself on error *immediately*.
|
||||
// when set to false, containers remain until the end of the integration test.
|
||||
config.AutoRemove = false
|
||||
config.RestartPolicy = docker.RestartPolicy{
|
||||
Name: "no",
|
||||
}
|
||||
}
|
||||
|
||||
func DockerAllowLocalIPv6(config *docker.HostConfig) {
|
||||
if config.Sysctls == nil {
|
||||
config.Sysctls = make(map[string]string, 1)
|
||||
}
|
||||
config.Sysctls["net.ipv6.conf.all.disable_ipv6"] = "0"
|
||||
}
|
||||
|
||||
func DockerAllowNetworkAdministration(config *docker.HostConfig) {
|
||||
config.CapAdd = append(config.CapAdd, "NET_ADMIN")
|
||||
config.Mounts = append(config.Mounts, docker.HostMount{
|
||||
Type: "bind",
|
||||
Source: "/dev/net/tun",
|
||||
Target: "/dev/net/tun",
|
||||
})
|
||||
}
|
||||
94
integration/dockertestutil/execute.go
Normal file
94
integration/dockertestutil/execute.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package dockertestutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ory/dockertest/v3"
|
||||
)
|
||||
|
||||
const dockerExecuteTimeout = time.Second * 10
|
||||
|
||||
var (
|
||||
ErrDockertestCommandFailed = errors.New("dockertest command failed")
|
||||
ErrDockertestCommandTimeout = errors.New("dockertest command timed out")
|
||||
)
|
||||
|
||||
type ExecuteCommandConfig struct {
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
type ExecuteCommandOption func(*ExecuteCommandConfig) error
|
||||
|
||||
func ExecuteCommandTimeout(timeout time.Duration) ExecuteCommandOption {
|
||||
return ExecuteCommandOption(func(conf *ExecuteCommandConfig) error {
|
||||
conf.timeout = timeout
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func ExecuteCommand(
|
||||
resource *dockertest.Resource,
|
||||
cmd []string,
|
||||
env []string,
|
||||
options ...ExecuteCommandOption,
|
||||
) (string, string, error) {
|
||||
var stdout bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
|
||||
execConfig := ExecuteCommandConfig{
|
||||
timeout: dockerExecuteTimeout,
|
||||
}
|
||||
|
||||
for _, opt := range options {
|
||||
if err := opt(&execConfig); err != nil {
|
||||
return "", "", fmt.Errorf("execute-command/options: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
type result struct {
|
||||
exitCode int
|
||||
err error
|
||||
}
|
||||
|
||||
resultChan := make(chan result, 1)
|
||||
|
||||
// Run your long running function in it's own goroutine and pass back it's
|
||||
// response into our channel.
|
||||
go func() {
|
||||
exitCode, err := resource.Exec(
|
||||
cmd,
|
||||
dockertest.ExecOptions{
|
||||
Env: append(env, "HEADSCALE_LOG_LEVEL=disabled"),
|
||||
StdOut: &stdout,
|
||||
StdErr: &stderr,
|
||||
},
|
||||
)
|
||||
resultChan <- result{exitCode, err}
|
||||
}()
|
||||
|
||||
// Listen on our channel AND a timeout channel - which ever happens first.
|
||||
select {
|
||||
case res := <-resultChan:
|
||||
if res.err != nil {
|
||||
return stdout.String(), stderr.String(), res.err
|
||||
}
|
||||
|
||||
if res.exitCode != 0 {
|
||||
// Uncomment for debugging
|
||||
// log.Println("Command: ", cmd)
|
||||
// log.Println("stdout: ", stdout.String())
|
||||
// log.Println("stderr: ", stderr.String())
|
||||
|
||||
return stdout.String(), stderr.String(), ErrDockertestCommandFailed
|
||||
}
|
||||
|
||||
return stdout.String(), stderr.String(), nil
|
||||
case <-time.After(execConfig.timeout):
|
||||
|
||||
return stdout.String(), stderr.String(), ErrDockertestCommandTimeout
|
||||
}
|
||||
}
|
||||
62
integration/dockertestutil/network.go
Normal file
62
integration/dockertestutil/network.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package dockertestutil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
)
|
||||
|
||||
var ErrContainerNotFound = errors.New("container not found")
|
||||
|
||||
func GetFirstOrCreateNetwork(pool *dockertest.Pool, name string) (*dockertest.Network, error) {
|
||||
networks, err := pool.NetworksByName(name)
|
||||
if err != nil || len(networks) == 0 {
|
||||
if _, err := pool.CreateNetwork(name); err == nil {
|
||||
// Create does not give us an updated version of the resource, so we need to
|
||||
// get it again.
|
||||
networks, err := pool.NetworksByName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &networks[0], nil
|
||||
}
|
||||
}
|
||||
|
||||
return &networks[0], nil
|
||||
}
|
||||
|
||||
func AddContainerToNetwork(
|
||||
pool *dockertest.Pool,
|
||||
network *dockertest.Network,
|
||||
testContainer string,
|
||||
) error {
|
||||
containers, err := pool.Client.ListContainers(docker.ListContainersOptions{
|
||||
All: true,
|
||||
Filters: map[string][]string{
|
||||
"name": {testContainer},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = pool.Client.ConnectNetwork(network.Network.ID, docker.NetworkConnectionOptions{
|
||||
Container: containers[0].ID,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(kradalby): This doesnt work reliably, but calling the exact same functions
|
||||
// seem to work fine...
|
||||
// if container, ok := pool.ContainerByName("/" + testContainer); ok {
|
||||
// err := container.ConnectToNetwork(network)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
|
||||
return nil
|
||||
}
|
||||
350
integration/general_test.go
Normal file
350
integration/general_test.go
Normal file
@@ -0,0 +1,350 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/juanfont/headscale/integration/tsic"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func TestPingAllByIP(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
"namespace1": len(TailscaleVersions),
|
||||
"namespace2": len(TailscaleVersions),
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("pingallbyip"))
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
success := 0
|
||||
|
||||
for _, client := range allClients {
|
||||
for _, ip := range allIps {
|
||||
err := client.Ping(ip.String())
|
||||
if err != nil {
|
||||
t.Errorf("failed to ping %s from %s: %s", ip, client.Hostname(), err)
|
||||
} else {
|
||||
success++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPingAllByHostname(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
// Omit 1.16.2 (-1) because it does not have the FQDN field
|
||||
"namespace3": len(TailscaleVersions) - 1,
|
||||
"namespace4": len(TailscaleVersions) - 1,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("pingallbyname"))
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
allHostnames, err := scenario.ListTailscaleClientsFQDNs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get FQDNs: %s", err)
|
||||
}
|
||||
|
||||
success := 0
|
||||
|
||||
for _, client := range allClients {
|
||||
for _, hostname := range allHostnames {
|
||||
err := client.Ping(hostname)
|
||||
if err != nil {
|
||||
t.Errorf("failed to ping %s from %s: %s", hostname, client.Hostname(), err)
|
||||
} else {
|
||||
success++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allClients))
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// If subtests are parallel, then they will start before setup is run.
|
||||
// This might mean we approach setup slightly wrong, but for now, ignore
|
||||
// the linter
|
||||
// nolint:tparallel
|
||||
func TestTaildrop(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
retry := func(times int, sleepInverval time.Duration, doWork func() error) error {
|
||||
var err error
|
||||
for attempts := 0; attempts < times; attempts++ {
|
||||
err = doWork()
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(sleepInverval)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
// Omit 1.16.2 (-1) because it does not have the FQDN field
|
||||
"taildrop": len(TailscaleVersions) - 1,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("taildrop"))
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
// This will essentially fetch and cache all the FQDNs
|
||||
_, err = scenario.ListTailscaleClientsFQDNs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get FQDNs: %s", err)
|
||||
}
|
||||
|
||||
for _, client := range allClients {
|
||||
command := []string{"touch", fmt.Sprintf("/tmp/file_from_%s", client.Hostname())}
|
||||
|
||||
if _, _, err := client.Execute(command); err != nil {
|
||||
t.Errorf("failed to create taildrop file on %s, err: %s", client.Hostname(), err)
|
||||
}
|
||||
|
||||
for _, peer := range allClients {
|
||||
if client.Hostname() == peer.Hostname() {
|
||||
continue
|
||||
}
|
||||
|
||||
// It is safe to ignore this error as we handled it when caching it
|
||||
peerFQDN, _ := peer.FQDN()
|
||||
|
||||
t.Run(fmt.Sprintf("%s-%s", client.Hostname(), peer.Hostname()), func(t *testing.T) {
|
||||
command := []string{
|
||||
"tailscale", "file", "cp",
|
||||
fmt.Sprintf("/tmp/file_from_%s", client.Hostname()),
|
||||
fmt.Sprintf("%s:", peerFQDN),
|
||||
}
|
||||
|
||||
err := retry(10, 1*time.Second, func() error {
|
||||
t.Logf(
|
||||
"Sending file from %s to %s\n",
|
||||
client.Hostname(),
|
||||
peer.Hostname(),
|
||||
)
|
||||
_, _, err := client.Execute(command)
|
||||
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf(
|
||||
"failed to send taildrop file on %s, err: %s",
|
||||
client.Hostname(),
|
||||
err,
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for _, client := range allClients {
|
||||
command := []string{
|
||||
"tailscale", "file",
|
||||
"get",
|
||||
"/tmp/",
|
||||
}
|
||||
if _, _, err := client.Execute(command); err != nil {
|
||||
t.Errorf("failed to get taildrop file on %s, err: %s", client.Hostname(), err)
|
||||
}
|
||||
|
||||
for _, peer := range allClients {
|
||||
if client.Hostname() == peer.Hostname() {
|
||||
continue
|
||||
}
|
||||
|
||||
t.Run(fmt.Sprintf("%s-%s", client.Hostname(), peer.Hostname()), func(t *testing.T) {
|
||||
command := []string{
|
||||
"ls",
|
||||
fmt.Sprintf("/tmp/file_from_%s", peer.Hostname()),
|
||||
}
|
||||
log.Printf(
|
||||
"Checking file in %s from %s\n",
|
||||
client.Hostname(),
|
||||
peer.Hostname(),
|
||||
)
|
||||
|
||||
result, _, err := client.Execute(command)
|
||||
if err != nil {
|
||||
t.Errorf("failed to execute command to ls taildrop: %s", err)
|
||||
}
|
||||
|
||||
log.Printf("Result for %s: %s\n", peer.Hostname(), result)
|
||||
if fmt.Sprintf("/tmp/file_from_%s\n", peer.Hostname()) != result {
|
||||
t.Errorf(
|
||||
"taildrop result is not correct %s, wanted %s",
|
||||
result,
|
||||
fmt.Sprintf("/tmp/file_from_%s\n", peer.Hostname()),
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMagicDNS(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
// Omit 1.16.2 (-1) because it does not have the FQDN field
|
||||
"magicdns1": len(TailscaleVersions) - 1,
|
||||
"magicdns2": len(TailscaleVersions) - 1,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("magicdns"))
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
// Poor mans cache
|
||||
_, err = scenario.ListTailscaleClientsFQDNs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get FQDNs: %s", err)
|
||||
}
|
||||
|
||||
_, err = scenario.ListTailscaleClientsIPs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get IPs: %s", err)
|
||||
}
|
||||
|
||||
for _, client := range allClients {
|
||||
for _, peer := range allClients {
|
||||
// It is safe to ignore this error as we handled it when caching it
|
||||
peerFQDN, _ := peer.FQDN()
|
||||
|
||||
command := []string{
|
||||
"tailscale",
|
||||
"ip", peerFQDN,
|
||||
}
|
||||
result, _, err := client.Execute(command)
|
||||
if err != nil {
|
||||
t.Errorf(
|
||||
"failed to execute resolve/ip command %s from %s: %s",
|
||||
peerFQDN,
|
||||
client.Hostname(),
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
ips, err := peer.IPs()
|
||||
if err != nil {
|
||||
t.Errorf(
|
||||
"failed to get ips for %s: %s",
|
||||
peer.Hostname(),
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
for _, ip := range ips {
|
||||
if !strings.Contains(result, ip.String()) {
|
||||
t.Errorf("ip %s is not found in \n%s\n", ip.String(), result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
97
integration/hsic/config.go
Normal file
97
integration/hsic/config.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package hsic
|
||||
|
||||
// const (
|
||||
// defaultEphemeralNodeInactivityTimeout = time.Second * 30
|
||||
// defaultNodeUpdateCheckInterval = time.Second * 10
|
||||
// )
|
||||
|
||||
// TODO(kradalby): This approach doesnt work because we cannot
|
||||
// serialise our config object to YAML or JSON.
|
||||
// func DefaultConfig() headscale.Config {
|
||||
// derpMap, _ := url.Parse("https://controlplane.tailscale.com/derpmap/default")
|
||||
//
|
||||
// config := headscale.Config{
|
||||
// Log: headscale.LogConfig{
|
||||
// Level: zerolog.TraceLevel,
|
||||
// },
|
||||
// ACL: headscale.GetACLConfig(),
|
||||
// DBtype: "sqlite3",
|
||||
// EphemeralNodeInactivityTimeout: defaultEphemeralNodeInactivityTimeout,
|
||||
// NodeUpdateCheckInterval: defaultNodeUpdateCheckInterval,
|
||||
// IPPrefixes: []netip.Prefix{
|
||||
// netip.MustParsePrefix("fd7a:115c:a1e0::/48"),
|
||||
// netip.MustParsePrefix("100.64.0.0/10"),
|
||||
// },
|
||||
// DNSConfig: &tailcfg.DNSConfig{
|
||||
// Proxied: true,
|
||||
// Nameservers: []netip.Addr{
|
||||
// netip.MustParseAddr("127.0.0.11"),
|
||||
// netip.MustParseAddr("1.1.1.1"),
|
||||
// },
|
||||
// Resolvers: []*dnstype.Resolver{
|
||||
// {
|
||||
// Addr: "127.0.0.11",
|
||||
// },
|
||||
// {
|
||||
// Addr: "1.1.1.1",
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// BaseDomain: "headscale.net",
|
||||
//
|
||||
// DBpath: "/tmp/integration_test_db.sqlite3",
|
||||
//
|
||||
// PrivateKeyPath: "/tmp/integration_private.key",
|
||||
// NoisePrivateKeyPath: "/tmp/noise_integration_private.key",
|
||||
// Addr: "0.0.0.0:8080",
|
||||
// MetricsAddr: "127.0.0.1:9090",
|
||||
// ServerURL: "http://headscale:8080",
|
||||
//
|
||||
// DERP: headscale.DERPConfig{
|
||||
// URLs: []url.URL{
|
||||
// *derpMap,
|
||||
// },
|
||||
// AutoUpdate: false,
|
||||
// UpdateFrequency: 1 * time.Minute,
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// return config
|
||||
// }
|
||||
|
||||
// TODO: Reuse the actual configuration object above.
|
||||
func DefaultConfigYAML() string {
|
||||
yaml := `
|
||||
log:
|
||||
level: trace
|
||||
acl_policy_path: ""
|
||||
db_type: sqlite3
|
||||
db_path: /tmp/integration_test_db.sqlite3
|
||||
ephemeral_node_inactivity_timeout: 30m
|
||||
node_update_check_interval: 10s
|
||||
ip_prefixes:
|
||||
- fd7a:115c:a1e0::/48
|
||||
- 100.64.0.0/10
|
||||
dns_config:
|
||||
base_domain: headscale.net
|
||||
magic_dns: true
|
||||
domains: []
|
||||
nameservers:
|
||||
- 127.0.0.11
|
||||
- 1.1.1.1
|
||||
private_key_path: /tmp/private.key
|
||||
noise:
|
||||
private_key_path: /tmp/noise_private.key
|
||||
listen_addr: 0.0.0.0:8080
|
||||
metrics_listen_addr: 127.0.0.1:9090
|
||||
server_url: http://headscale:8080
|
||||
|
||||
derp:
|
||||
urls:
|
||||
- https://controlplane.tailscale.com/derpmap/default
|
||||
auto_update_enabled: false
|
||||
update_frequency: 1m
|
||||
`
|
||||
|
||||
return yaml
|
||||
}
|
||||
458
integration/hsic/hsic.go
Normal file
458
integration/hsic/hsic.go
Normal file
@@ -0,0 +1,458 @@
|
||||
package hsic
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||
"github.com/juanfont/headscale/integration/integrationutil"
|
||||
"github.com/ory/dockertest/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
hsicHashLength = 6
|
||||
dockerContextPath = "../."
|
||||
aclPolicyPath = "/etc/headscale/acl.hujson"
|
||||
tlsCertPath = "/etc/headscale/tls.cert"
|
||||
tlsKeyPath = "/etc/headscale/tls.key"
|
||||
headscaleDefaultPort = 8080
|
||||
)
|
||||
|
||||
var errHeadscaleStatusCodeNotOk = errors.New("headscale status code not ok")
|
||||
|
||||
type HeadscaleInContainer struct {
|
||||
hostname string
|
||||
|
||||
pool *dockertest.Pool
|
||||
container *dockertest.Resource
|
||||
network *dockertest.Network
|
||||
|
||||
// optional config
|
||||
port int
|
||||
aclPolicy *headscale.ACLPolicy
|
||||
env []string
|
||||
tlsCert []byte
|
||||
tlsKey []byte
|
||||
}
|
||||
|
||||
type Option = func(c *HeadscaleInContainer)
|
||||
|
||||
func WithACLPolicy(acl *headscale.ACLPolicy) Option {
|
||||
return func(hsic *HeadscaleInContainer) {
|
||||
// TODO(kradalby): Move somewhere appropriate
|
||||
hsic.env = append(hsic.env, fmt.Sprintf("HEADSCALE_ACL_POLICY_PATH=%s", aclPolicyPath))
|
||||
|
||||
hsic.aclPolicy = acl
|
||||
}
|
||||
}
|
||||
|
||||
func WithTLS() Option {
|
||||
return func(hsic *HeadscaleInContainer) {
|
||||
cert, key, err := createCertificate()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create certificates for headscale test: %s", err)
|
||||
}
|
||||
|
||||
// TODO(kradalby): Move somewhere appropriate
|
||||
hsic.env = append(hsic.env, fmt.Sprintf("HEADSCALE_TLS_CERT_PATH=%s", tlsCertPath))
|
||||
hsic.env = append(hsic.env, fmt.Sprintf("HEADSCALE_TLS_KEY_PATH=%s", tlsKeyPath))
|
||||
|
||||
hsic.tlsCert = cert
|
||||
hsic.tlsKey = key
|
||||
}
|
||||
}
|
||||
|
||||
func WithConfigEnv(configEnv map[string]string) Option {
|
||||
return func(hsic *HeadscaleInContainer) {
|
||||
for key, value := range configEnv {
|
||||
hsic.env = append(hsic.env, fmt.Sprintf("%s=%s", key, value))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func WithPort(port int) Option {
|
||||
return func(hsic *HeadscaleInContainer) {
|
||||
hsic.port = port
|
||||
}
|
||||
}
|
||||
|
||||
func WithTestName(testName string) Option {
|
||||
return func(hsic *HeadscaleInContainer) {
|
||||
hash, _ := headscale.GenerateRandomStringDNSSafe(hsicHashLength)
|
||||
|
||||
hostname := fmt.Sprintf("hs-%s-%s", testName, hash)
|
||||
hsic.hostname = hostname
|
||||
}
|
||||
}
|
||||
|
||||
func WithHostnameAsServerURL() Option {
|
||||
return func(hsic *HeadscaleInContainer) {
|
||||
hsic.env = append(
|
||||
hsic.env,
|
||||
fmt.Sprintf("HEADSCALE_SERVER_URL=http://%s:%d",
|
||||
hsic.GetHostname(),
|
||||
hsic.port,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
func New(
|
||||
pool *dockertest.Pool,
|
||||
network *dockertest.Network,
|
||||
opts ...Option,
|
||||
) (*HeadscaleInContainer, error) {
|
||||
hash, err := headscale.GenerateRandomStringDNSSafe(hsicHashLength)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hostname := fmt.Sprintf("hs-%s", hash)
|
||||
|
||||
hsic := &HeadscaleInContainer{
|
||||
hostname: hostname,
|
||||
port: headscaleDefaultPort,
|
||||
|
||||
pool: pool,
|
||||
network: network,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(hsic)
|
||||
}
|
||||
|
||||
log.Println("NAME: ", hsic.hostname)
|
||||
|
||||
portProto := fmt.Sprintf("%d/tcp", hsic.port)
|
||||
|
||||
headscaleBuildOptions := &dockertest.BuildOptions{
|
||||
Dockerfile: "Dockerfile.debug",
|
||||
ContextDir: dockerContextPath,
|
||||
}
|
||||
|
||||
runOptions := &dockertest.RunOptions{
|
||||
Name: hsic.hostname,
|
||||
ExposedPorts: []string{portProto},
|
||||
Networks: []*dockertest.Network{network},
|
||||
// Cmd: []string{"headscale", "serve"},
|
||||
// TODO(kradalby): Get rid of this hack, we currently need to give us some
|
||||
// to inject the headscale configuration further down.
|
||||
Entrypoint: []string{"/bin/bash", "-c", "/bin/sleep 3 ; headscale serve"},
|
||||
Env: hsic.env,
|
||||
}
|
||||
|
||||
// dockertest isnt very good at handling containers that has already
|
||||
// been created, this is an attempt to make sure this container isnt
|
||||
// present.
|
||||
err = pool.RemoveContainerByName(hsic.hostname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
container, err := pool.BuildAndRunWithBuildOptions(
|
||||
headscaleBuildOptions,
|
||||
runOptions,
|
||||
dockertestutil.DockerRestartPolicy,
|
||||
dockertestutil.DockerAllowLocalIPv6,
|
||||
dockertestutil.DockerAllowNetworkAdministration,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not start headscale container: %w", err)
|
||||
}
|
||||
log.Printf("Created %s container\n", hsic.hostname)
|
||||
|
||||
hsic.container = container
|
||||
|
||||
err = hsic.WriteFile("/etc/headscale/config.yaml", []byte(DefaultConfigYAML()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to write headscale config to container: %w", err)
|
||||
}
|
||||
|
||||
if hsic.aclPolicy != nil {
|
||||
data, err := json.Marshal(hsic.aclPolicy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal ACL Policy to JSON: %w", err)
|
||||
}
|
||||
|
||||
err = hsic.WriteFile(aclPolicyPath, data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to write ACL policy to container: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if hsic.hasTLS() {
|
||||
err = hsic.WriteFile(tlsCertPath, hsic.tlsCert)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to write TLS certificate to container: %w", err)
|
||||
}
|
||||
|
||||
err = hsic.WriteFile(tlsKeyPath, hsic.tlsKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to write TLS key to container: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return hsic, nil
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) hasTLS() bool {
|
||||
return len(t.tlsCert) != 0 && len(t.tlsKey) != 0
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) Shutdown() error {
|
||||
return t.pool.Purge(t.container)
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) Execute(
|
||||
command []string,
|
||||
) (string, error) {
|
||||
stdout, stderr, err := dockertestutil.ExecuteCommand(
|
||||
t.container,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
if err != nil {
|
||||
log.Printf("command stderr: %s\n", stderr)
|
||||
|
||||
if stdout != "" {
|
||||
log.Printf("command stdout: %s\n", stdout)
|
||||
}
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
return stdout, nil
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) GetIP() string {
|
||||
return t.container.GetIPInNetwork(t.network)
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) GetPort() string {
|
||||
return fmt.Sprintf("%d", t.port)
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) GetHealthEndpoint() string {
|
||||
return fmt.Sprintf("%s/health", t.GetEndpoint())
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) GetEndpoint() string {
|
||||
hostEndpoint := fmt.Sprintf("%s:%d",
|
||||
t.GetIP(),
|
||||
t.port)
|
||||
|
||||
if t.hasTLS() {
|
||||
return fmt.Sprintf("https://%s", hostEndpoint)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("http://%s", hostEndpoint)
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) GetCert() []byte {
|
||||
return t.tlsCert
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) GetHostname() string {
|
||||
return t.hostname
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) WaitForReady() error {
|
||||
url := t.GetHealthEndpoint()
|
||||
|
||||
log.Printf("waiting for headscale to be ready at %s", url)
|
||||
|
||||
client := &http.Client{}
|
||||
|
||||
if t.hasTLS() {
|
||||
insecureTransport := http.DefaultTransport.(*http.Transport).Clone() //nolint
|
||||
insecureTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} //nolint
|
||||
client = &http.Client{Transport: insecureTransport}
|
||||
}
|
||||
|
||||
return t.pool.Retry(func() error {
|
||||
resp, err := client.Get(url) //nolint
|
||||
if err != nil {
|
||||
return fmt.Errorf("headscale is not ready: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return errHeadscaleStatusCodeNotOk
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) CreateNamespace(
|
||||
namespace string,
|
||||
) error {
|
||||
command := []string{"headscale", "namespaces", "create", namespace}
|
||||
|
||||
_, _, err := dockertestutil.ExecuteCommand(
|
||||
t.container,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) CreateAuthKey(
|
||||
namespace string,
|
||||
) (*v1.PreAuthKey, error) {
|
||||
command := []string{
|
||||
"headscale",
|
||||
"--namespace",
|
||||
namespace,
|
||||
"preauthkeys",
|
||||
"create",
|
||||
"--reusable",
|
||||
"--expiration",
|
||||
"24h",
|
||||
"--output",
|
||||
"json",
|
||||
}
|
||||
|
||||
result, _, err := dockertestutil.ExecuteCommand(
|
||||
t.container,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute create auth key command: %w", err)
|
||||
}
|
||||
|
||||
var preAuthKey v1.PreAuthKey
|
||||
err = json.Unmarshal([]byte(result), &preAuthKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal auth key: %w", err)
|
||||
}
|
||||
|
||||
return &preAuthKey, nil
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) ListMachinesInNamespace(
|
||||
namespace string,
|
||||
) ([]*v1.Machine, error) {
|
||||
command := []string{"headscale", "--namespace", namespace, "nodes", "list", "--output", "json"}
|
||||
|
||||
result, _, err := dockertestutil.ExecuteCommand(
|
||||
t.container,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute list node command: %w", err)
|
||||
}
|
||||
|
||||
var nodes []*v1.Machine
|
||||
err = json.Unmarshal([]byte(result), &nodes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal nodes: %w", err)
|
||||
}
|
||||
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) WriteFile(path string, data []byte) error {
|
||||
return integrationutil.WriteFileToContainer(t.pool, t.container, path, data)
|
||||
}
|
||||
|
||||
// nolint
|
||||
func createCertificate() ([]byte, []byte, error) {
|
||||
// From:
|
||||
// https://shaneutt.com/blog/golang-ca-and-signed-cert-go/
|
||||
|
||||
ca := &x509.Certificate{
|
||||
SerialNumber: big.NewInt(2019),
|
||||
Subject: pkix.Name{
|
||||
Organization: []string{"Headscale testing INC"},
|
||||
Country: []string{"NL"},
|
||||
Locality: []string{"Leiden"},
|
||||
},
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().Add(30 * time.Minute),
|
||||
IsCA: true,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{
|
||||
x509.ExtKeyUsageClientAuth,
|
||||
x509.ExtKeyUsageServerAuth,
|
||||
},
|
||||
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
|
||||
caPrivKey, err := rsa.GenerateKey(rand.Reader, 4096)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cert := &x509.Certificate{
|
||||
SerialNumber: big.NewInt(1658),
|
||||
Subject: pkix.Name{
|
||||
Organization: []string{"Headscale testing INC"},
|
||||
Country: []string{"NL"},
|
||||
Locality: []string{"Leiden"},
|
||||
},
|
||||
IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1), net.IPv6loopback},
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().Add(30 * time.Minute),
|
||||
SubjectKeyId: []byte{1, 2, 3, 4, 6},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
|
||||
KeyUsage: x509.KeyUsageDigitalSignature,
|
||||
}
|
||||
|
||||
certPrivKey, err := rsa.GenerateKey(rand.Reader, 4096)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
certBytes, err := x509.CreateCertificate(
|
||||
rand.Reader,
|
||||
cert,
|
||||
ca,
|
||||
&certPrivKey.PublicKey,
|
||||
caPrivKey,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
certPEM := new(bytes.Buffer)
|
||||
|
||||
err = pem.Encode(certPEM, &pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: certBytes,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
certPrivKeyPEM := new(bytes.Buffer)
|
||||
|
||||
err = pem.Encode(certPrivKeyPEM, &pem.Block{
|
||||
Type: "RSA PRIVATE KEY",
|
||||
Bytes: x509.MarshalPKCS1PrivateKey(certPrivKey),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return certPEM.Bytes(), certPrivKeyPEM.Bytes(), nil
|
||||
}
|
||||
74
integration/integrationutil/util.go
Normal file
74
integration/integrationutil/util.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package integrationutil
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
)
|
||||
|
||||
func WriteFileToContainer(
|
||||
pool *dockertest.Pool,
|
||||
container *dockertest.Resource,
|
||||
path string,
|
||||
data []byte,
|
||||
) error {
|
||||
dirPath, fileName := filepath.Split(path)
|
||||
|
||||
file := bytes.NewReader(data)
|
||||
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
|
||||
tarWriter := tar.NewWriter(buf)
|
||||
|
||||
header := &tar.Header{
|
||||
Name: fileName,
|
||||
Size: file.Size(),
|
||||
// Mode: int64(stat.Mode()),
|
||||
// ModTime: stat.ModTime(),
|
||||
}
|
||||
|
||||
err := tarWriter.WriteHeader(header)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed write file header to tar: %w", err)
|
||||
}
|
||||
|
||||
_, err = io.Copy(tarWriter, file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy file to tar: %w", err)
|
||||
}
|
||||
|
||||
err = tarWriter.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to close tar: %w", err)
|
||||
}
|
||||
|
||||
// Ensure the directory is present inside the container
|
||||
_, _, err = dockertestutil.ExecuteCommand(
|
||||
container,
|
||||
[]string{"mkdir", "-p", dirPath},
|
||||
[]string{},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to ensure directory: %w", err)
|
||||
}
|
||||
|
||||
err = pool.Client.UploadToContainer(
|
||||
container.Container.ID,
|
||||
docker.UploadToContainerOptions{
|
||||
NoOverwriteDirNonDir: false,
|
||||
Path: dirPath,
|
||||
InputStream: bytes.NewReader(buf.Bytes()),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
471
integration/scenario.go
Normal file
471
integration/scenario.go
Normal file
@@ -0,0 +1,471 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/netip"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/juanfont/headscale/integration/tsic"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/puzpuzpuz/xsync/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
scenarioHashLength = 6
|
||||
maxWait = 60 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
errNoHeadscaleAvailable = errors.New("no headscale available")
|
||||
errNoNamespaceAvailable = errors.New("no namespace available")
|
||||
|
||||
// Tailscale started adding TS2021 support in CapabilityVersion>=28 (v1.24.0), but
|
||||
// proper support in Headscale was only added for CapabilityVersion>=39 clients (v1.30.0).
|
||||
tailscaleVersions2021 = []string{
|
||||
"head",
|
||||
"unstable",
|
||||
"1.34.0",
|
||||
"1.32.3",
|
||||
"1.30.2",
|
||||
}
|
||||
|
||||
tailscaleVersions2019 = []string{
|
||||
"1.28.0",
|
||||
"1.26.2",
|
||||
"1.24.2",
|
||||
"1.22.2",
|
||||
"1.20.4",
|
||||
"1.18.2",
|
||||
}
|
||||
|
||||
// tailscaleVersionsUnavailable = []string{
|
||||
// // These versions seem to fail when fetching from apt.
|
||||
// "1.16.2",
|
||||
// "1.14.6",
|
||||
// "1.12.4",
|
||||
// "1.10.2",
|
||||
// "1.8.7",
|
||||
// }.
|
||||
|
||||
TailscaleVersions = append(
|
||||
tailscaleVersions2021,
|
||||
tailscaleVersions2019...,
|
||||
)
|
||||
)
|
||||
|
||||
type Namespace struct {
|
||||
Clients map[string]TailscaleClient
|
||||
|
||||
createWaitGroup sync.WaitGroup
|
||||
joinWaitGroup sync.WaitGroup
|
||||
syncWaitGroup sync.WaitGroup
|
||||
}
|
||||
|
||||
// TODO(kradalby): make control server configurable, test correctness with Tailscale SaaS.
|
||||
type Scenario struct {
|
||||
// TODO(kradalby): support multiple headcales for later, currently only
|
||||
// use one.
|
||||
controlServers *xsync.MapOf[string, ControlServer]
|
||||
|
||||
namespaces map[string]*Namespace
|
||||
|
||||
pool *dockertest.Pool
|
||||
network *dockertest.Network
|
||||
|
||||
headscaleLock sync.Mutex
|
||||
}
|
||||
|
||||
func NewScenario() (*Scenario, error) {
|
||||
hash, err := headscale.GenerateRandomStringDNSSafe(scenarioHashLength)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pool, err := dockertest.NewPool("")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not connect to docker: %w", err)
|
||||
}
|
||||
|
||||
pool.MaxWait = maxWait
|
||||
|
||||
networkName := fmt.Sprintf("hs-%s", hash)
|
||||
if overrideNetworkName := os.Getenv("HEADSCALE_TEST_NETWORK_NAME"); overrideNetworkName != "" {
|
||||
networkName = overrideNetworkName
|
||||
}
|
||||
|
||||
network, err := dockertestutil.GetFirstOrCreateNetwork(pool, networkName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create or get network: %w", err)
|
||||
}
|
||||
|
||||
// We run the test suite in a docker container that calls a couple of endpoints for
|
||||
// readiness checks, this ensures that we can run the tests with individual networks
|
||||
// and have the client reach the different containers
|
||||
err = dockertestutil.AddContainerToNetwork(pool, network, "headscale-test-suite")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to add test suite container to network: %w", err)
|
||||
}
|
||||
|
||||
return &Scenario{
|
||||
controlServers: xsync.NewMapOf[ControlServer](),
|
||||
namespaces: make(map[string]*Namespace),
|
||||
|
||||
pool: pool,
|
||||
network: network,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Scenario) Shutdown() error {
|
||||
s.controlServers.Range(func(_ string, control ControlServer) bool {
|
||||
err := control.Shutdown()
|
||||
if err != nil {
|
||||
log.Printf(
|
||||
"Failed to shut down control: %s",
|
||||
fmt.Errorf("failed to tear down control: %w", err),
|
||||
)
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
for namespaceName, namespace := range s.namespaces {
|
||||
for _, client := range namespace.Clients {
|
||||
log.Printf("removing client %s in namespace %s", client.Hostname(), namespaceName)
|
||||
err := client.Shutdown()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to tear down client: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.pool.RemoveNetwork(s.network); err != nil {
|
||||
return fmt.Errorf("failed to remove network: %w", err)
|
||||
}
|
||||
|
||||
// TODO(kradalby): This seem redundant to the previous call
|
||||
// if err := s.network.Close(); err != nil {
|
||||
// return fmt.Errorf("failed to tear down network: %w", err)
|
||||
// }
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Scenario) Namespaces() []string {
|
||||
namespaces := make([]string, 0)
|
||||
for namespace := range s.namespaces {
|
||||
namespaces = append(namespaces, namespace)
|
||||
}
|
||||
|
||||
return namespaces
|
||||
}
|
||||
|
||||
/// Headscale related stuff
|
||||
// Note: These functions assume that there is a _single_ headscale instance for now
|
||||
|
||||
// TODO(kradalby): make port and headscale configurable, multiple instances support?
|
||||
func (s *Scenario) Headscale(opts ...hsic.Option) (ControlServer, error) {
|
||||
s.headscaleLock.Lock()
|
||||
defer s.headscaleLock.Unlock()
|
||||
|
||||
if headscale, ok := s.controlServers.Load("headscale"); ok {
|
||||
return headscale, nil
|
||||
}
|
||||
|
||||
headscale, err := hsic.New(s.pool, s.network, opts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create headscale container: %w", err)
|
||||
}
|
||||
|
||||
err = headscale.WaitForReady()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed reach headscale container: %w", err)
|
||||
}
|
||||
|
||||
s.controlServers.Store("headscale", headscale)
|
||||
|
||||
return headscale, nil
|
||||
}
|
||||
|
||||
func (s *Scenario) CreatePreAuthKey(namespace string) (*v1.PreAuthKey, error) {
|
||||
if headscale, err := s.Headscale(); err == nil {
|
||||
key, err := headscale.CreateAuthKey(namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create namespace: %w", err)
|
||||
}
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to create namespace: %w", errNoHeadscaleAvailable)
|
||||
}
|
||||
|
||||
func (s *Scenario) CreateNamespace(namespace string) error {
|
||||
if headscale, err := s.Headscale(); err == nil {
|
||||
err := headscale.CreateNamespace(namespace)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create namespace: %w", err)
|
||||
}
|
||||
|
||||
s.namespaces[namespace] = &Namespace{
|
||||
Clients: make(map[string]TailscaleClient),
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to create namespace: %w", errNoHeadscaleAvailable)
|
||||
}
|
||||
|
||||
/// Client related stuff
|
||||
|
||||
func (s *Scenario) CreateTailscaleNodesInNamespace(
|
||||
namespaceStr string,
|
||||
requestedVersion string,
|
||||
count int,
|
||||
opts ...tsic.Option,
|
||||
) error {
|
||||
if namespace, ok := s.namespaces[namespaceStr]; ok {
|
||||
for i := 0; i < count; i++ {
|
||||
version := requestedVersion
|
||||
if requestedVersion == "all" {
|
||||
version = TailscaleVersions[i%len(TailscaleVersions)]
|
||||
}
|
||||
|
||||
headscale, err := s.Headscale()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create tailscale node: %w", err)
|
||||
}
|
||||
|
||||
cert := headscale.GetCert()
|
||||
hostname := headscale.GetHostname()
|
||||
|
||||
namespace.createWaitGroup.Add(1)
|
||||
|
||||
opts = append(opts,
|
||||
tsic.WithHeadscaleTLS(cert),
|
||||
tsic.WithHeadscaleName(hostname),
|
||||
)
|
||||
|
||||
go func() {
|
||||
defer namespace.createWaitGroup.Done()
|
||||
|
||||
// TODO(kradalby): error handle this
|
||||
tsClient, err := tsic.New(
|
||||
s.pool,
|
||||
version,
|
||||
s.network,
|
||||
opts...,
|
||||
)
|
||||
if err != nil {
|
||||
// return fmt.Errorf("failed to add tailscale node: %w", err)
|
||||
log.Printf("failed to create tailscale node: %s", err)
|
||||
}
|
||||
|
||||
err = tsClient.WaitForReady()
|
||||
if err != nil {
|
||||
// return fmt.Errorf("failed to add tailscale node: %w", err)
|
||||
log.Printf("failed to wait for tailscaled: %s", err)
|
||||
}
|
||||
|
||||
namespace.Clients[tsClient.Hostname()] = tsClient
|
||||
}()
|
||||
}
|
||||
namespace.createWaitGroup.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to add tailscale node: %w", errNoNamespaceAvailable)
|
||||
}
|
||||
|
||||
func (s *Scenario) RunTailscaleUp(
|
||||
namespaceStr, loginServer, authKey string,
|
||||
) error {
|
||||
if namespace, ok := s.namespaces[namespaceStr]; ok {
|
||||
for _, client := range namespace.Clients {
|
||||
namespace.joinWaitGroup.Add(1)
|
||||
|
||||
go func(c TailscaleClient) {
|
||||
defer namespace.joinWaitGroup.Done()
|
||||
|
||||
// TODO(kradalby): error handle this
|
||||
_ = c.Up(loginServer, authKey)
|
||||
}(client)
|
||||
|
||||
err := client.WaitForReady()
|
||||
if err != nil {
|
||||
log.Printf("error waiting for client %s to be ready: %s", client.Hostname(), err)
|
||||
}
|
||||
}
|
||||
|
||||
namespace.joinWaitGroup.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to up tailscale node: %w", errNoNamespaceAvailable)
|
||||
}
|
||||
|
||||
func (s *Scenario) CountTailscale() int {
|
||||
count := 0
|
||||
|
||||
for _, namespace := range s.namespaces {
|
||||
count += len(namespace.Clients)
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
func (s *Scenario) WaitForTailscaleSync() error {
|
||||
tsCount := s.CountTailscale()
|
||||
|
||||
for _, namespace := range s.namespaces {
|
||||
for _, client := range namespace.Clients {
|
||||
namespace.syncWaitGroup.Add(1)
|
||||
|
||||
go func(c TailscaleClient) {
|
||||
defer namespace.syncWaitGroup.Done()
|
||||
|
||||
// TODO(kradalby): error handle this
|
||||
_ = c.WaitForPeers(tsCount)
|
||||
}(client)
|
||||
}
|
||||
namespace.syncWaitGroup.Wait()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateHeadscaleEnv is a conventient method returning a set up Headcale
|
||||
// test environment with nodes of all versions, joined to the server with X
|
||||
// namespaces.
|
||||
func (s *Scenario) CreateHeadscaleEnv(
|
||||
namespaces map[string]int,
|
||||
tsOpts []tsic.Option,
|
||||
opts ...hsic.Option,
|
||||
) error {
|
||||
headscale, err := s.Headscale(opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for namespaceName, clientCount := range namespaces {
|
||||
err = s.CreateNamespace(namespaceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.CreateTailscaleNodesInNamespace(namespaceName, "all", clientCount, tsOpts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key, err := s.CreatePreAuthKey(namespaceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.RunTailscaleUp(namespaceName, headscale.GetEndpoint(), key.GetKey())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Scenario) GetIPs(namespace string) ([]netip.Addr, error) {
|
||||
var ips []netip.Addr
|
||||
if ns, ok := s.namespaces[namespace]; ok {
|
||||
for _, client := range ns.Clients {
|
||||
clientIps, err := client.IPs()
|
||||
if err != nil {
|
||||
return ips, fmt.Errorf("failed to get ips: %w", err)
|
||||
}
|
||||
ips = append(ips, clientIps...)
|
||||
}
|
||||
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
return ips, fmt.Errorf("failed to get ips: %w", errNoNamespaceAvailable)
|
||||
}
|
||||
|
||||
func (s *Scenario) GetClients(namespace string) ([]TailscaleClient, error) {
|
||||
var clients []TailscaleClient
|
||||
if ns, ok := s.namespaces[namespace]; ok {
|
||||
for _, client := range ns.Clients {
|
||||
clients = append(clients, client)
|
||||
}
|
||||
|
||||
return clients, nil
|
||||
}
|
||||
|
||||
return clients, fmt.Errorf("failed to get clients: %w", errNoNamespaceAvailable)
|
||||
}
|
||||
|
||||
func (s *Scenario) ListTailscaleClients(namespaces ...string) ([]TailscaleClient, error) {
|
||||
var allClients []TailscaleClient
|
||||
|
||||
if len(namespaces) == 0 {
|
||||
namespaces = s.Namespaces()
|
||||
}
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
clients, err := s.GetClients(namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allClients = append(allClients, clients...)
|
||||
}
|
||||
|
||||
return allClients, nil
|
||||
}
|
||||
|
||||
func (s *Scenario) ListTailscaleClientsIPs(namespaces ...string) ([]netip.Addr, error) {
|
||||
var allIps []netip.Addr
|
||||
|
||||
if len(namespaces) == 0 {
|
||||
namespaces = s.Namespaces()
|
||||
}
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
ips, err := s.GetIPs(namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allIps = append(allIps, ips...)
|
||||
}
|
||||
|
||||
return allIps, nil
|
||||
}
|
||||
|
||||
func (s *Scenario) ListTailscaleClientsFQDNs(namespaces ...string) ([]string, error) {
|
||||
allFQDNs := make([]string, 0)
|
||||
|
||||
clients, err := s.ListTailscaleClients(namespaces...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, client := range clients {
|
||||
fqdn, err := client.FQDN()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allFQDNs = append(allFQDNs, fqdn)
|
||||
}
|
||||
|
||||
return allFQDNs, nil
|
||||
}
|
||||
204
integration/scenario_test.go
Normal file
204
integration/scenario_test.go
Normal file
@@ -0,0 +1,204 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||
)
|
||||
|
||||
// This file is intended to "test the test framework", by proxy it will also test
|
||||
// some Headcsale/Tailscale stuff, but mostly in very simple ways.
|
||||
|
||||
func IntegrationSkip(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
if !dockertestutil.IsRunningInContainer() {
|
||||
t.Skip("not running in docker, skipping")
|
||||
}
|
||||
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration tests due to short flag")
|
||||
}
|
||||
}
|
||||
|
||||
// If subtests are parallel, then they will start before setup is run.
|
||||
// This might mean we approach setup slightly wrong, but for now, ignore
|
||||
// the linter
|
||||
// nolint:tparallel
|
||||
func TestHeadscale(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
var err error
|
||||
|
||||
namespace := "test-space"
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
t.Run("start-headscale", func(t *testing.T) {
|
||||
headscale, err := scenario.Headscale()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create start headcale: %s", err)
|
||||
}
|
||||
|
||||
err = headscale.WaitForReady()
|
||||
if err != nil {
|
||||
t.Errorf("headscale failed to become ready: %s", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("create-namespace", func(t *testing.T) {
|
||||
err := scenario.CreateNamespace(namespace)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create namespace: %s", err)
|
||||
}
|
||||
|
||||
if _, ok := scenario.namespaces[namespace]; !ok {
|
||||
t.Errorf("namespace is not in scenario")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("create-auth-key", func(t *testing.T) {
|
||||
_, err := scenario.CreatePreAuthKey(namespace)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create preauthkey: %s", err)
|
||||
}
|
||||
})
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// If subtests are parallel, then they will start before setup is run.
|
||||
// This might mean we approach setup slightly wrong, but for now, ignore
|
||||
// the linter
|
||||
// nolint:tparallel
|
||||
func TestCreateTailscale(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
namespace := "only-create-containers"
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
scenario.namespaces[namespace] = &Namespace{
|
||||
Clients: make(map[string]TailscaleClient),
|
||||
}
|
||||
|
||||
t.Run("create-tailscale", func(t *testing.T) {
|
||||
err := scenario.CreateTailscaleNodesInNamespace(namespace, "all", 3)
|
||||
if err != nil {
|
||||
t.Errorf("failed to add tailscale nodes: %s", err)
|
||||
}
|
||||
|
||||
if clients := len(scenario.namespaces[namespace].Clients); clients != 3 {
|
||||
t.Errorf("wrong number of tailscale clients: %d != %d", clients, 3)
|
||||
}
|
||||
|
||||
// TODO(kradalby): Test "all" version logic
|
||||
})
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// If subtests are parallel, then they will start before setup is run.
|
||||
// This might mean we approach setup slightly wrong, but for now, ignore
|
||||
// the linter
|
||||
// nolint:tparallel
|
||||
func TestTailscaleNodesJoiningHeadcale(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
var err error
|
||||
|
||||
namespace := "join-node-test"
|
||||
|
||||
count := 1
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
t.Run("start-headscale", func(t *testing.T) {
|
||||
headscale, err := scenario.Headscale()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create start headcale: %s", err)
|
||||
}
|
||||
|
||||
err = headscale.WaitForReady()
|
||||
if err != nil {
|
||||
t.Errorf("headscale failed to become ready: %s", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("create-namespace", func(t *testing.T) {
|
||||
err := scenario.CreateNamespace(namespace)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create namespace: %s", err)
|
||||
}
|
||||
|
||||
if _, ok := scenario.namespaces[namespace]; !ok {
|
||||
t.Errorf("namespace is not in scenario")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("create-tailscale", func(t *testing.T) {
|
||||
err := scenario.CreateTailscaleNodesInNamespace(namespace, "1.30.2", count)
|
||||
if err != nil {
|
||||
t.Errorf("failed to add tailscale nodes: %s", err)
|
||||
}
|
||||
|
||||
if clients := len(scenario.namespaces[namespace].Clients); clients != count {
|
||||
t.Errorf("wrong number of tailscale clients: %d != %d", clients, count)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("join-headscale", func(t *testing.T) {
|
||||
key, err := scenario.CreatePreAuthKey(namespace)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create preauthkey: %s", err)
|
||||
}
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create start headcale: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.RunTailscaleUp(
|
||||
namespace,
|
||||
headscale.GetEndpoint(),
|
||||
key.GetKey(),
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("failed to login: %s", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("get-ips", func(t *testing.T) {
|
||||
ips, err := scenario.GetIPs(namespace)
|
||||
if err != nil {
|
||||
t.Errorf("failed to get tailscale ips: %s", err)
|
||||
}
|
||||
|
||||
if len(ips) != count*2 {
|
||||
t.Errorf("got the wrong amount of tailscale ips, %d != %d", len(ips), count*2)
|
||||
}
|
||||
})
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
519
integration/ssh_test.go
Normal file
519
integration/ssh_test.go
Normal file
@@ -0,0 +1,519 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/juanfont/headscale/integration/tsic"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var retry = func(times int, sleepInterval time.Duration,
|
||||
doWork func() (string, string, error),
|
||||
) (string, string, error) {
|
||||
var result string
|
||||
var stderr string
|
||||
var err error
|
||||
|
||||
for attempts := 0; attempts < times; attempts++ {
|
||||
tempResult, tempStderr, err := doWork()
|
||||
|
||||
result += tempResult
|
||||
stderr += tempStderr
|
||||
|
||||
if err == nil {
|
||||
return result, stderr, nil
|
||||
}
|
||||
|
||||
// If we get a permission denied error, we can fail immediately
|
||||
// since that is something we wont recover from by retrying.
|
||||
if err != nil && strings.Contains(stderr, "Permission denied (tailscale)") {
|
||||
return result, stderr, err
|
||||
}
|
||||
|
||||
time.Sleep(sleepInterval)
|
||||
}
|
||||
|
||||
return result, stderr, err
|
||||
}
|
||||
|
||||
func TestSSHOneNamespaceAllToAll(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
"namespace1": len(TailscaleVersions) - 5,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec,
|
||||
[]tsic.Option{tsic.WithSSH()},
|
||||
hsic.WithACLPolicy(
|
||||
&headscale.ACLPolicy{
|
||||
Groups: map[string][]string{
|
||||
"group:integration-test": {"namespace1"},
|
||||
},
|
||||
ACLs: []headscale.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"*"},
|
||||
Destinations: []string{"*:*"},
|
||||
},
|
||||
},
|
||||
SSHs: []headscale.SSH{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"group:integration-test"},
|
||||
Destinations: []string{"group:integration-test"},
|
||||
Users: []string{"ssh-it-user"},
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
hsic.WithConfigEnv(map[string]string{
|
||||
"HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1",
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
_, err = scenario.ListTailscaleClientsFQDNs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get FQDNs: %s", err)
|
||||
}
|
||||
|
||||
for _, client := range allClients {
|
||||
for _, peer := range allClients {
|
||||
if client.Hostname() == peer.Hostname() {
|
||||
continue
|
||||
}
|
||||
|
||||
assertSSHHostname(t, client, peer)
|
||||
}
|
||||
}
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSSHMultipleNamespacesAllToAll(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
"namespace1": len(TailscaleVersions) - 5,
|
||||
"namespace2": len(TailscaleVersions) - 5,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec,
|
||||
[]tsic.Option{tsic.WithSSH()},
|
||||
hsic.WithACLPolicy(
|
||||
&headscale.ACLPolicy{
|
||||
Groups: map[string][]string{
|
||||
"group:integration-test": {"namespace1", "namespace2"},
|
||||
},
|
||||
ACLs: []headscale.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"*"},
|
||||
Destinations: []string{"*:*"},
|
||||
},
|
||||
},
|
||||
SSHs: []headscale.SSH{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"group:integration-test"},
|
||||
Destinations: []string{"group:integration-test"},
|
||||
Users: []string{"ssh-it-user"},
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
hsic.WithConfigEnv(map[string]string{
|
||||
"HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1",
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
nsOneClients, err := scenario.ListTailscaleClients("namespace1")
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
nsTwoClients, err := scenario.ListTailscaleClients("namespace2")
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
_, err = scenario.ListTailscaleClientsFQDNs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get FQDNs: %s", err)
|
||||
}
|
||||
|
||||
testInterNamespaceSSH := func(sourceClients []TailscaleClient, targetClients []TailscaleClient) {
|
||||
for _, client := range sourceClients {
|
||||
for _, peer := range targetClients {
|
||||
assertSSHHostname(t, client, peer)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
testInterNamespaceSSH(nsOneClients, nsTwoClients)
|
||||
testInterNamespaceSSH(nsTwoClients, nsOneClients)
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSSHNoSSHConfigured(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
"namespace1": len(TailscaleVersions) - 5,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec,
|
||||
[]tsic.Option{tsic.WithSSH()},
|
||||
hsic.WithACLPolicy(
|
||||
&headscale.ACLPolicy{
|
||||
Groups: map[string][]string{
|
||||
"group:integration-test": {"namespace1"},
|
||||
},
|
||||
ACLs: []headscale.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"*"},
|
||||
Destinations: []string{"*:*"},
|
||||
},
|
||||
},
|
||||
SSHs: []headscale.SSH{},
|
||||
},
|
||||
),
|
||||
hsic.WithTestName("sshnoneconfigured"),
|
||||
hsic.WithConfigEnv(map[string]string{
|
||||
"HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1",
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
_, err = scenario.ListTailscaleClientsFQDNs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get FQDNs: %s", err)
|
||||
}
|
||||
|
||||
for _, client := range allClients {
|
||||
for _, peer := range allClients {
|
||||
if client.Hostname() == peer.Hostname() {
|
||||
continue
|
||||
}
|
||||
|
||||
assertSSHPermissionDenied(t, client, peer)
|
||||
}
|
||||
}
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSSHIsBlockedInACL(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
"namespace1": len(TailscaleVersions) - 5,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec,
|
||||
[]tsic.Option{tsic.WithSSH()},
|
||||
hsic.WithACLPolicy(
|
||||
&headscale.ACLPolicy{
|
||||
Groups: map[string][]string{
|
||||
"group:integration-test": {"namespace1"},
|
||||
},
|
||||
ACLs: []headscale.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"*"},
|
||||
Destinations: []string{"*:80"},
|
||||
},
|
||||
},
|
||||
SSHs: []headscale.SSH{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"group:integration-test"},
|
||||
Destinations: []string{"group:integration-test"},
|
||||
Users: []string{"ssh-it-user"},
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
hsic.WithTestName("sshisblockedinacl"),
|
||||
hsic.WithConfigEnv(map[string]string{
|
||||
"HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1",
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
_, err = scenario.ListTailscaleClientsFQDNs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get FQDNs: %s", err)
|
||||
}
|
||||
|
||||
for _, client := range allClients {
|
||||
for _, peer := range allClients {
|
||||
if client.Hostname() == peer.Hostname() {
|
||||
continue
|
||||
}
|
||||
|
||||
assertSSHTimeout(t, client, peer)
|
||||
}
|
||||
}
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSSNamespaceOnlyIsolation(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
"namespaceacl1": len(TailscaleVersions) - 5,
|
||||
"namespaceacl2": len(TailscaleVersions) - 5,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec,
|
||||
[]tsic.Option{tsic.WithSSH()},
|
||||
hsic.WithACLPolicy(
|
||||
&headscale.ACLPolicy{
|
||||
Groups: map[string][]string{
|
||||
"group:ssh1": {"namespaceacl1"},
|
||||
"group:ssh2": {"namespaceacl2"},
|
||||
},
|
||||
ACLs: []headscale.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"*"},
|
||||
Destinations: []string{"*:*"},
|
||||
},
|
||||
},
|
||||
SSHs: []headscale.SSH{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"group:ssh1"},
|
||||
Destinations: []string{"group:ssh1"},
|
||||
Users: []string{"ssh-it-user"},
|
||||
},
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"group:ssh2"},
|
||||
Destinations: []string{"group:ssh2"},
|
||||
Users: []string{"ssh-it-user"},
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
hsic.WithTestName("sshtwonamespaceaclblock"),
|
||||
hsic.WithConfigEnv(map[string]string{
|
||||
"HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1",
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
ssh1Clients, err := scenario.ListTailscaleClients("namespaceacl1")
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
ssh2Clients, err := scenario.ListTailscaleClients("namespaceacl2")
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
_, err = scenario.ListTailscaleClientsFQDNs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get FQDNs: %s", err)
|
||||
}
|
||||
|
||||
// TODO(kradalby,evenh): ACLs do currently not cover reject
|
||||
// cases properly, and currently will accept all incomming connections
|
||||
// as long as a rule is present.
|
||||
//
|
||||
// for _, client := range ssh1Clients {
|
||||
// for _, peer := range ssh2Clients {
|
||||
// if client.Hostname() == peer.Hostname() {
|
||||
// continue
|
||||
// }
|
||||
//
|
||||
// assertSSHPermissionDenied(t, client, peer)
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// for _, client := range ssh2Clients {
|
||||
// for _, peer := range ssh1Clients {
|
||||
// if client.Hostname() == peer.Hostname() {
|
||||
// continue
|
||||
// }
|
||||
//
|
||||
// assertSSHPermissionDenied(t, client, peer)
|
||||
// }
|
||||
// }
|
||||
|
||||
for _, client := range ssh1Clients {
|
||||
for _, peer := range ssh1Clients {
|
||||
if client.Hostname() == peer.Hostname() {
|
||||
continue
|
||||
}
|
||||
|
||||
assertSSHHostname(t, client, peer)
|
||||
}
|
||||
}
|
||||
|
||||
for _, client := range ssh2Clients {
|
||||
for _, peer := range ssh2Clients {
|
||||
if client.Hostname() == peer.Hostname() {
|
||||
continue
|
||||
}
|
||||
|
||||
assertSSHHostname(t, client, peer)
|
||||
}
|
||||
}
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doSSH(t *testing.T, client TailscaleClient, peer TailscaleClient) (string, string, error) {
|
||||
t.Helper()
|
||||
|
||||
peerFQDN, _ := peer.FQDN()
|
||||
|
||||
command := []string{
|
||||
"ssh", "-o StrictHostKeyChecking=no", "-o ConnectTimeout=1",
|
||||
fmt.Sprintf("%s@%s", "ssh-it-user", peerFQDN),
|
||||
"'hostname'",
|
||||
}
|
||||
|
||||
return retry(10, 1*time.Second, func() (string, string, error) {
|
||||
return client.Execute(command)
|
||||
})
|
||||
}
|
||||
|
||||
func assertSSHHostname(t *testing.T, client TailscaleClient, peer TailscaleClient) {
|
||||
t.Helper()
|
||||
|
||||
result, _, err := doSSH(t, client, peer)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, peer.ID(), strings.ReplaceAll(result, "\n", ""))
|
||||
}
|
||||
|
||||
func assertSSHPermissionDenied(t *testing.T, client TailscaleClient, peer TailscaleClient) {
|
||||
t.Helper()
|
||||
|
||||
result, stderr, err := doSSH(t, client, peer)
|
||||
assert.Error(t, err)
|
||||
|
||||
assert.Empty(t, result)
|
||||
|
||||
assert.Contains(t, stderr, "Permission denied (tailscale)")
|
||||
}
|
||||
|
||||
func assertSSHTimeout(t *testing.T, client TailscaleClient, peer TailscaleClient) {
|
||||
t.Helper()
|
||||
|
||||
result, stderr, err := doSSH(t, client, peer)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Empty(t, result)
|
||||
|
||||
assert.Contains(t, stderr, "Connection timed out")
|
||||
}
|
||||
27
integration/tailscale.go
Normal file
27
integration/tailscale.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"net/url"
|
||||
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
)
|
||||
|
||||
// nolint
|
||||
type TailscaleClient interface {
|
||||
Hostname() string
|
||||
Shutdown() error
|
||||
Version() string
|
||||
Execute(command []string) (string, string, error)
|
||||
Up(loginServer, authKey string) error
|
||||
UpWithLoginURL(loginServer string) (*url.URL, error)
|
||||
Logout() error
|
||||
IPs() ([]netip.Addr, error)
|
||||
FQDN() (string, error)
|
||||
Status() (*ipnstate.Status, error)
|
||||
WaitForReady() error
|
||||
WaitForLogout() error
|
||||
WaitForPeers(expected int) error
|
||||
Ping(hostnameOrIP string) error
|
||||
ID() string
|
||||
}
|
||||
470
integration/tsic/tsic.go
Normal file
470
integration/tsic/tsic.go
Normal file
@@ -0,0 +1,470 @@
|
||||
package tsic
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/juanfont/headscale"
|
||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||
"github.com/juanfont/headscale/integration/integrationutil"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
)
|
||||
|
||||
const (
|
||||
tsicHashLength = 6
|
||||
dockerContextPath = "../."
|
||||
headscaleCertPath = "/usr/local/share/ca-certificates/headscale.crt"
|
||||
)
|
||||
|
||||
var (
|
||||
errTailscalePingFailed = errors.New("ping failed")
|
||||
errTailscaleNotLoggedIn = errors.New("tailscale not logged in")
|
||||
errTailscaleWrongPeerCount = errors.New("wrong peer count")
|
||||
errTailscaleCannotUpWithoutAuthkey = errors.New("cannot up without authkey")
|
||||
errTailscaleNotConnected = errors.New("tailscale not connected")
|
||||
errTailscaleNotLoggedOut = errors.New("tailscale not logged out")
|
||||
)
|
||||
|
||||
type TailscaleInContainer struct {
|
||||
version string
|
||||
hostname string
|
||||
|
||||
pool *dockertest.Pool
|
||||
container *dockertest.Resource
|
||||
network *dockertest.Network
|
||||
|
||||
// "cache"
|
||||
ips []netip.Addr
|
||||
fqdn string
|
||||
|
||||
// optional config
|
||||
headscaleCert []byte
|
||||
headscaleHostname string
|
||||
withSSH bool
|
||||
}
|
||||
|
||||
type Option = func(c *TailscaleInContainer)
|
||||
|
||||
func WithHeadscaleTLS(cert []byte) Option {
|
||||
return func(tsic *TailscaleInContainer) {
|
||||
tsic.headscaleCert = cert
|
||||
}
|
||||
}
|
||||
|
||||
func WithOrCreateNetwork(network *dockertest.Network) Option {
|
||||
return func(tsic *TailscaleInContainer) {
|
||||
if network != nil {
|
||||
tsic.network = network
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
network, err := dockertestutil.GetFirstOrCreateNetwork(
|
||||
tsic.pool,
|
||||
fmt.Sprintf("%s-network", tsic.hostname),
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create network: %s", err)
|
||||
}
|
||||
|
||||
tsic.network = network
|
||||
}
|
||||
}
|
||||
|
||||
func WithHeadscaleName(hsName string) Option {
|
||||
return func(tsic *TailscaleInContainer) {
|
||||
tsic.headscaleHostname = hsName
|
||||
}
|
||||
}
|
||||
|
||||
func WithSSH() Option {
|
||||
return func(tsic *TailscaleInContainer) {
|
||||
tsic.withSSH = true
|
||||
}
|
||||
}
|
||||
|
||||
func New(
|
||||
pool *dockertest.Pool,
|
||||
version string,
|
||||
network *dockertest.Network,
|
||||
opts ...Option,
|
||||
) (*TailscaleInContainer, error) {
|
||||
hash, err := headscale.GenerateRandomStringDNSSafe(tsicHashLength)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hostname := fmt.Sprintf("ts-%s-%s", strings.ReplaceAll(version, ".", "-"), hash)
|
||||
|
||||
tsic := &TailscaleInContainer{
|
||||
version: version,
|
||||
hostname: hostname,
|
||||
|
||||
pool: pool,
|
||||
network: network,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(tsic)
|
||||
}
|
||||
|
||||
tailscaleOptions := &dockertest.RunOptions{
|
||||
Name: hostname,
|
||||
Networks: []*dockertest.Network{network},
|
||||
// Cmd: []string{
|
||||
// "tailscaled", "--tun=tsdev",
|
||||
// },
|
||||
Entrypoint: []string{
|
||||
"/bin/bash",
|
||||
"-c",
|
||||
"/bin/sleep 3 ; update-ca-certificates ; tailscaled --tun=tsdev",
|
||||
},
|
||||
}
|
||||
|
||||
if tsic.headscaleHostname != "" {
|
||||
tailscaleOptions.ExtraHosts = []string{
|
||||
"host.docker.internal:host-gateway",
|
||||
fmt.Sprintf("%s:host-gateway", tsic.headscaleHostname),
|
||||
}
|
||||
}
|
||||
|
||||
// dockertest isnt very good at handling containers that has already
|
||||
// been created, this is an attempt to make sure this container isnt
|
||||
// present.
|
||||
err = pool.RemoveContainerByName(hostname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
container, err := pool.BuildAndRunWithBuildOptions(
|
||||
createTailscaleBuildOptions(version),
|
||||
tailscaleOptions,
|
||||
dockertestutil.DockerRestartPolicy,
|
||||
dockertestutil.DockerAllowLocalIPv6,
|
||||
dockertestutil.DockerAllowNetworkAdministration,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not start tailscale container: %w", err)
|
||||
}
|
||||
log.Printf("Created %s container\n", hostname)
|
||||
|
||||
tsic.container = container
|
||||
|
||||
if tsic.hasTLS() {
|
||||
err = tsic.WriteFile(headscaleCertPath, tsic.headscaleCert)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to write TLS certificate to container: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return tsic, nil
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) hasTLS() bool {
|
||||
return len(t.headscaleCert) != 0
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) Shutdown() error {
|
||||
return t.pool.Purge(t.container)
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) Hostname() string {
|
||||
return t.hostname
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) Version() string {
|
||||
return t.version
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) ID() string {
|
||||
return t.container.Container.ID
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) Execute(
|
||||
command []string,
|
||||
) (string, string, error) {
|
||||
stdout, stderr, err := dockertestutil.ExecuteCommand(
|
||||
t.container,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
if err != nil {
|
||||
log.Printf("command stderr: %s\n", stderr)
|
||||
|
||||
if stdout != "" {
|
||||
log.Printf("command stdout: %s\n", stdout)
|
||||
}
|
||||
|
||||
if strings.Contains(stderr, "NeedsLogin") {
|
||||
return stdout, stderr, errTailscaleNotLoggedIn
|
||||
}
|
||||
|
||||
return stdout, stderr, err
|
||||
}
|
||||
|
||||
return stdout, stderr, nil
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) Up(
|
||||
loginServer, authKey string,
|
||||
) error {
|
||||
command := []string{
|
||||
"tailscale",
|
||||
"up",
|
||||
"-login-server",
|
||||
loginServer,
|
||||
"--authkey",
|
||||
authKey,
|
||||
"--hostname",
|
||||
t.hostname,
|
||||
}
|
||||
|
||||
if t.withSSH {
|
||||
command = append(command, "--ssh")
|
||||
}
|
||||
|
||||
if _, _, err := t.Execute(command); err != nil {
|
||||
return fmt.Errorf("failed to join tailscale client: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) UpWithLoginURL(
|
||||
loginServer string,
|
||||
) (*url.URL, error) {
|
||||
command := []string{
|
||||
"tailscale",
|
||||
"up",
|
||||
"-login-server",
|
||||
loginServer,
|
||||
"--hostname",
|
||||
t.hostname,
|
||||
}
|
||||
|
||||
_, stderr, err := t.Execute(command)
|
||||
if errors.Is(err, errTailscaleNotLoggedIn) {
|
||||
return nil, errTailscaleCannotUpWithoutAuthkey
|
||||
}
|
||||
|
||||
urlStr := strings.ReplaceAll(stderr, "\nTo authenticate, visit:\n\n\t", "")
|
||||
urlStr = strings.TrimSpace(urlStr)
|
||||
|
||||
// parse URL
|
||||
loginURL, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
log.Printf("Could not parse login URL: %s", err)
|
||||
log.Printf("Original join command result: %s", stderr)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return loginURL, nil
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) Logout() error {
|
||||
_, _, err := t.Execute([]string{"tailscale", "logout"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) IPs() ([]netip.Addr, error) {
|
||||
if t.ips != nil && len(t.ips) != 0 {
|
||||
return t.ips, nil
|
||||
}
|
||||
|
||||
ips := make([]netip.Addr, 0)
|
||||
|
||||
command := []string{
|
||||
"tailscale",
|
||||
"ip",
|
||||
}
|
||||
|
||||
result, _, err := t.Execute(command)
|
||||
if err != nil {
|
||||
return []netip.Addr{}, fmt.Errorf("failed to join tailscale client: %w", err)
|
||||
}
|
||||
|
||||
for _, address := range strings.Split(result, "\n") {
|
||||
address = strings.TrimSuffix(address, "\n")
|
||||
if len(address) < 1 {
|
||||
continue
|
||||
}
|
||||
ip, err := netip.ParseAddr(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ips = append(ips, ip)
|
||||
}
|
||||
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) Status() (*ipnstate.Status, error) {
|
||||
command := []string{
|
||||
"tailscale",
|
||||
"status",
|
||||
"--json",
|
||||
}
|
||||
|
||||
result, _, err := t.Execute(command)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute tailscale status command: %w", err)
|
||||
}
|
||||
|
||||
var status ipnstate.Status
|
||||
err = json.Unmarshal([]byte(result), &status)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal tailscale status: %w", err)
|
||||
}
|
||||
|
||||
return &status, err
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) FQDN() (string, error) {
|
||||
if t.fqdn != "" {
|
||||
return t.fqdn, nil
|
||||
}
|
||||
|
||||
status, err := t.Status()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get FQDN: %w", err)
|
||||
}
|
||||
|
||||
return status.Self.DNSName, nil
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) WaitForReady() error {
|
||||
return t.pool.Retry(func() error {
|
||||
status, err := t.Status()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch tailscale status: %w", err)
|
||||
}
|
||||
|
||||
if status.CurrentTailnet != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errTailscaleNotConnected
|
||||
})
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) WaitForLogout() error {
|
||||
return t.pool.Retry(func() error {
|
||||
status, err := t.Status()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch tailscale status: %w", err)
|
||||
}
|
||||
|
||||
if status.CurrentTailnet == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errTailscaleNotLoggedOut
|
||||
})
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) WaitForPeers(expected int) error {
|
||||
return t.pool.Retry(func() error {
|
||||
status, err := t.Status()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch tailscale status: %w", err)
|
||||
}
|
||||
|
||||
if peers := status.Peers(); len(peers) != expected {
|
||||
return errTailscaleWrongPeerCount
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// TODO(kradalby): Make multiping, go routine magic.
|
||||
func (t *TailscaleInContainer) Ping(hostnameOrIP string) error {
|
||||
return t.pool.Retry(func() error {
|
||||
command := []string{
|
||||
"tailscale", "ping",
|
||||
"--timeout=1s",
|
||||
"--c=10",
|
||||
"--until-direct=true",
|
||||
hostnameOrIP,
|
||||
}
|
||||
|
||||
result, _, err := t.Execute(command)
|
||||
if err != nil {
|
||||
log.Printf(
|
||||
"failed to run ping command from %s to %s, err: %s",
|
||||
t.Hostname(),
|
||||
hostnameOrIP,
|
||||
err,
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if !strings.Contains(result, "pong") && !strings.Contains(result, "is local") {
|
||||
return backoff.Permanent(errTailscalePingFailed)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) WriteFile(path string, data []byte) error {
|
||||
return integrationutil.WriteFileToContainer(t.pool, t.container, path, data)
|
||||
}
|
||||
|
||||
func createTailscaleBuildOptions(version string) *dockertest.BuildOptions {
|
||||
var tailscaleBuildOptions *dockertest.BuildOptions
|
||||
switch version {
|
||||
case "head":
|
||||
tailscaleBuildOptions = &dockertest.BuildOptions{
|
||||
Dockerfile: "Dockerfile.tailscale-HEAD",
|
||||
ContextDir: dockerContextPath,
|
||||
BuildArgs: []docker.BuildArg{},
|
||||
}
|
||||
case "unstable":
|
||||
tailscaleBuildOptions = &dockertest.BuildOptions{
|
||||
Dockerfile: "Dockerfile.tailscale",
|
||||
ContextDir: dockerContextPath,
|
||||
BuildArgs: []docker.BuildArg{
|
||||
{
|
||||
Name: "TAILSCALE_VERSION",
|
||||
Value: "*", // Installs the latest version https://askubuntu.com/a/824926
|
||||
},
|
||||
{
|
||||
Name: "TAILSCALE_CHANNEL",
|
||||
Value: "unstable",
|
||||
},
|
||||
},
|
||||
}
|
||||
default:
|
||||
tailscaleBuildOptions = &dockertest.BuildOptions{
|
||||
Dockerfile: "Dockerfile.tailscale",
|
||||
ContextDir: dockerContextPath,
|
||||
BuildArgs: []docker.BuildArg{
|
||||
{
|
||||
Name: "TAILSCALE_VERSION",
|
||||
Value: version,
|
||||
},
|
||||
{
|
||||
Name: "TAILSCALE_CHANNEL",
|
||||
Value: "stable",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return tailscaleBuildOptions
|
||||
}
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build integration_cli
|
||||
|
||||
// nolint
|
||||
package headscale
|
||||
|
||||
import (
|
||||
@@ -13,6 +12,7 @@ import (
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
@@ -27,7 +27,11 @@ type IntegrationCLITestSuite struct {
|
||||
env []string
|
||||
}
|
||||
|
||||
func TestCLIIntegrationTestSuite(t *testing.T) {
|
||||
func TestIntegrationCLITestSuite(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration tests due to short flag")
|
||||
}
|
||||
|
||||
s := new(IntegrationCLITestSuite)
|
||||
|
||||
suite.Run(t, s)
|
||||
@@ -42,11 +46,11 @@ func (s *IntegrationCLITestSuite) SetupTest() {
|
||||
s.FailNow(fmt.Sprintf("Could not connect to docker: %s", err), "")
|
||||
}
|
||||
|
||||
if pnetwork, err := s.pool.CreateNetwork("headscale-test"); err == nil {
|
||||
s.network = *pnetwork
|
||||
} else {
|
||||
s.FailNow(fmt.Sprintf("Could not create network: %s", err), "")
|
||||
network, err := GetFirstOrCreateNetwork(&s.pool, headscaleNetwork)
|
||||
if err != nil {
|
||||
s.FailNow(fmt.Sprintf("Failed to create or get network: %s", err), "")
|
||||
}
|
||||
s.network = network
|
||||
|
||||
headscaleBuildOptions := &dockertest.BuildOptions{
|
||||
Dockerfile: "Dockerfile",
|
||||
@@ -63,8 +67,12 @@ func (s *IntegrationCLITestSuite) SetupTest() {
|
||||
Mounts: []string{
|
||||
fmt.Sprintf("%s/integration_test/etc:/etc/headscale", currentPath),
|
||||
},
|
||||
Networks: []*dockertest.Network{&s.network},
|
||||
Cmd: []string{"headscale", "serve"},
|
||||
Cmd: []string{"headscale", "serve"},
|
||||
Networks: []*dockertest.Network{&s.network},
|
||||
ExposedPorts: []string{"8080/tcp"},
|
||||
PortBindings: map[docker.Port][]docker.PortBinding{
|
||||
"8080/tcp": {{HostPort: "8080"}},
|
||||
},
|
||||
}
|
||||
|
||||
err = s.pool.RemoveContainerByName(headscaleHostname)
|
||||
@@ -87,7 +95,9 @@ func (s *IntegrationCLITestSuite) SetupTest() {
|
||||
fmt.Println("Created headscale container for CLI tests")
|
||||
|
||||
fmt.Println("Waiting for headscale to be ready for CLI tests")
|
||||
hostEndpoint := fmt.Sprintf("localhost:%s", s.headscale.GetPort("8080/tcp"))
|
||||
hostEndpoint := fmt.Sprintf("%s:%s",
|
||||
s.headscale.GetIPInNetwork(&s.network),
|
||||
s.headscale.GetPort("8080/tcp"))
|
||||
|
||||
if err := s.pool.Retry(func() error {
|
||||
url := fmt.Sprintf("http://%s/health", hostEndpoint)
|
||||
@@ -129,7 +139,7 @@ func (s *IntegrationCLITestSuite) HandleStats(
|
||||
}
|
||||
|
||||
func (s *IntegrationCLITestSuite) createNamespace(name string) (*v1.Namespace, error) {
|
||||
result, err := ExecuteCommand(
|
||||
result, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -172,7 +182,7 @@ func (s *IntegrationCLITestSuite) TestNamespaceCommand() {
|
||||
assert.Equal(s.T(), names[2], namespaces[2].Name)
|
||||
|
||||
// Test list namespaces
|
||||
listResult, err := ExecuteCommand(
|
||||
listResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -194,7 +204,7 @@ func (s *IntegrationCLITestSuite) TestNamespaceCommand() {
|
||||
assert.Equal(s.T(), names[2], listedNamespaces[2].Name)
|
||||
|
||||
// Test rename namespace
|
||||
renameResult, err := ExecuteCommand(
|
||||
renameResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -216,7 +226,7 @@ func (s *IntegrationCLITestSuite) TestNamespaceCommand() {
|
||||
assert.Equal(s.T(), renamedNamespace.Name, "newname")
|
||||
|
||||
// Test list after rename namespaces
|
||||
listAfterRenameResult, err := ExecuteCommand(
|
||||
listAfterRenameResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -247,7 +257,7 @@ func (s *IntegrationCLITestSuite) TestPreAuthKeyCommand() {
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
preAuthResult, err := ExecuteCommand(
|
||||
preAuthResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -260,6 +270,8 @@ func (s *IntegrationCLITestSuite) TestPreAuthKeyCommand() {
|
||||
"24h",
|
||||
"--output",
|
||||
"json",
|
||||
"--tags",
|
||||
"tag:test1,tag:test2",
|
||||
},
|
||||
[]string{},
|
||||
)
|
||||
@@ -275,7 +287,7 @@ func (s *IntegrationCLITestSuite) TestPreAuthKeyCommand() {
|
||||
assert.Len(s.T(), keys, 5)
|
||||
|
||||
// Test list of keys
|
||||
listResult, err := ExecuteCommand(
|
||||
listResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -333,9 +345,14 @@ func (s *IntegrationCLITestSuite) TestPreAuthKeyCommand() {
|
||||
listedPreAuthKeys[4].Expiration.AsTime().Before(time.Now().Add(time.Hour*26)),
|
||||
)
|
||||
|
||||
// Test that tags are present
|
||||
for i := 0; i < count; i++ {
|
||||
assert.Equal(s.T(), listedPreAuthKeys[i].AclTags, []string{"tag:test1", "tag:test2"})
|
||||
}
|
||||
|
||||
// Expire three keys
|
||||
for i := 0; i < 3; i++ {
|
||||
_, err := ExecuteCommand(
|
||||
_, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -351,7 +368,7 @@ func (s *IntegrationCLITestSuite) TestPreAuthKeyCommand() {
|
||||
}
|
||||
|
||||
// Test list pre auth keys after expire
|
||||
listAfterExpireResult, err := ExecuteCommand(
|
||||
listAfterExpireResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -396,7 +413,7 @@ func (s *IntegrationCLITestSuite) TestPreAuthKeyCommandWithoutExpiry() {
|
||||
namespace, err := s.createNamespace("pre-auth-key-without-exp-namespace")
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
preAuthResult, err := ExecuteCommand(
|
||||
preAuthResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -417,7 +434,7 @@ func (s *IntegrationCLITestSuite) TestPreAuthKeyCommandWithoutExpiry() {
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
// Test list of keys
|
||||
listResult, err := ExecuteCommand(
|
||||
listResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -449,7 +466,7 @@ func (s *IntegrationCLITestSuite) TestPreAuthKeyCommandReusableEphemeral() {
|
||||
namespace, err := s.createNamespace("pre-auth-key-reus-ephm-namespace")
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
preAuthReusableResult, err := ExecuteCommand(
|
||||
preAuthReusableResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -472,7 +489,7 @@ func (s *IntegrationCLITestSuite) TestPreAuthKeyCommandReusableEphemeral() {
|
||||
assert.True(s.T(), preAuthReusableKey.GetReusable())
|
||||
assert.False(s.T(), preAuthReusableKey.GetEphemeral())
|
||||
|
||||
preAuthEphemeralResult, err := ExecuteCommand(
|
||||
preAuthEphemeralResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -514,7 +531,7 @@ func (s *IntegrationCLITestSuite) TestPreAuthKeyCommandReusableEphemeral() {
|
||||
// assert.NotNil(s.T(), err)
|
||||
|
||||
// Test list of keys
|
||||
listResult, err := ExecuteCommand(
|
||||
listResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -541,14 +558,14 @@ func (s *IntegrationCLITestSuite) TestNodeTagCommand() {
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
machineKeys := []string{
|
||||
"9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
|
||||
"6abd00bb5fdda622db51387088c68e97e71ce58e7056aa54f592b6a8219d524c",
|
||||
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
|
||||
"nodekey:6abd00bb5fdda622db51387088c68e97e71ce58e7056aa54f592b6a8219d524c",
|
||||
}
|
||||
machines := make([]*v1.Machine, len(machineKeys))
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
for index, machineKey := range machineKeys {
|
||||
_, err := ExecuteCommand(
|
||||
_, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -567,7 +584,7 @@ func (s *IntegrationCLITestSuite) TestNodeTagCommand() {
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
machineResult, err := ExecuteCommand(
|
||||
machineResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -592,7 +609,7 @@ func (s *IntegrationCLITestSuite) TestNodeTagCommand() {
|
||||
}
|
||||
assert.Len(s.T(), machines, len(machineKeys))
|
||||
|
||||
addTagResult, err := ExecuteCommand(
|
||||
addTagResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -612,7 +629,7 @@ func (s *IntegrationCLITestSuite) TestNodeTagCommand() {
|
||||
assert.Equal(s.T(), []string{"tag:test"}, machine.ForcedTags)
|
||||
|
||||
// try to set a wrong tag and retrieve the error
|
||||
wrongTagResult, err := ExecuteCommand(
|
||||
wrongTagResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -634,7 +651,7 @@ func (s *IntegrationCLITestSuite) TestNodeTagCommand() {
|
||||
assert.Contains(s.T(), errorOutput.Error, "tag must start with the string 'tag:'")
|
||||
|
||||
// Test list all nodes after added seconds
|
||||
listAllResult, err := ExecuteCommand(
|
||||
listAllResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -674,17 +691,17 @@ func (s *IntegrationCLITestSuite) TestNodeCommand() {
|
||||
|
||||
// Randomly generated machine keys
|
||||
machineKeys := []string{
|
||||
"9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
|
||||
"6abd00bb5fdda622db51387088c68e97e71ce58e7056aa54f592b6a8219d524c",
|
||||
"f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
|
||||
"8bc13285cee598acf76b1824a6f4490f7f2e3751b201e28aeb3b07fe81d5b4a1",
|
||||
"cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
|
||||
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
|
||||
"nodekey:6abd00bb5fdda622db51387088c68e97e71ce58e7056aa54f592b6a8219d524c",
|
||||
"nodekey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
|
||||
"nodekey:8bc13285cee598acf76b1824a6f4490f7f2e3751b201e28aeb3b07fe81d5b4a1",
|
||||
"nodekey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
|
||||
}
|
||||
machines := make([]*v1.Machine, len(machineKeys))
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
for index, machineKey := range machineKeys {
|
||||
_, err := ExecuteCommand(
|
||||
_, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -703,7 +720,7 @@ func (s *IntegrationCLITestSuite) TestNodeCommand() {
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
machineResult, err := ExecuteCommand(
|
||||
machineResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -730,7 +747,7 @@ func (s *IntegrationCLITestSuite) TestNodeCommand() {
|
||||
assert.Len(s.T(), machines, len(machineKeys))
|
||||
|
||||
// Test list all nodes after added seconds
|
||||
listAllResult, err := ExecuteCommand(
|
||||
listAllResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -762,14 +779,14 @@ func (s *IntegrationCLITestSuite) TestNodeCommand() {
|
||||
assert.Equal(s.T(), "machine-5", listAll[4].Name)
|
||||
|
||||
otherNamespaceMachineKeys := []string{
|
||||
"b5b444774186d4217adcec407563a1223929465ee2c68a4da13af0d0185b4f8e",
|
||||
"dc721977ac7415aafa87f7d4574cbe07c6b171834a6d37375782bdc1fb6b3584",
|
||||
"nodekey:b5b444774186d4217adcec407563a1223929465ee2c68a4da13af0d0185b4f8e",
|
||||
"nodekey:dc721977ac7415aafa87f7d4574cbe07c6b171834a6d37375782bdc1fb6b3584",
|
||||
}
|
||||
otherNamespaceMachines := make([]*v1.Machine, len(otherNamespaceMachineKeys))
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
for index, machineKey := range otherNamespaceMachineKeys {
|
||||
_, err := ExecuteCommand(
|
||||
_, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -788,7 +805,7 @@ func (s *IntegrationCLITestSuite) TestNodeCommand() {
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
machineResult, err := ExecuteCommand(
|
||||
machineResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -815,7 +832,7 @@ func (s *IntegrationCLITestSuite) TestNodeCommand() {
|
||||
assert.Len(s.T(), otherNamespaceMachines, len(otherNamespaceMachineKeys))
|
||||
|
||||
// Test list all nodes after added otherNamespace
|
||||
listAllWithotherNamespaceResult, err := ExecuteCommand(
|
||||
listAllWithotherNamespaceResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -845,7 +862,7 @@ func (s *IntegrationCLITestSuite) TestNodeCommand() {
|
||||
assert.Equal(s.T(), "otherNamespace-machine-2", listAllWithotherNamespace[6].Name)
|
||||
|
||||
// Test list all nodes after added otherNamespace
|
||||
listOnlyotherNamespaceMachineNamespaceResult, err := ExecuteCommand(
|
||||
listOnlyotherNamespaceMachineNamespaceResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -884,7 +901,7 @@ func (s *IntegrationCLITestSuite) TestNodeCommand() {
|
||||
)
|
||||
|
||||
// Delete a machines
|
||||
_, err = ExecuteCommand(
|
||||
_, _, err = ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -902,7 +919,7 @@ func (s *IntegrationCLITestSuite) TestNodeCommand() {
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
// Test: list main namespace after machine is deleted
|
||||
listOnlyMachineNamespaceAfterDeleteResult, err := ExecuteCommand(
|
||||
listOnlyMachineNamespaceAfterDeleteResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -933,17 +950,17 @@ func (s *IntegrationCLITestSuite) TestNodeExpireCommand() {
|
||||
|
||||
// Randomly generated machine keys
|
||||
machineKeys := []string{
|
||||
"9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
|
||||
"6abd00bb5fdda622db51387088c68e97e71ce58e7056aa54f592b6a8219d524c",
|
||||
"f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
|
||||
"8bc13285cee598acf76b1824a6f4490f7f2e3751b201e28aeb3b07fe81d5b4a1",
|
||||
"cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
|
||||
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
|
||||
"nodekey:6abd00bb5fdda622db51387088c68e97e71ce58e7056aa54f592b6a8219d524c",
|
||||
"nodekey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
|
||||
"nodekey:8bc13285cee598acf76b1824a6f4490f7f2e3751b201e28aeb3b07fe81d5b4a1",
|
||||
"nodekey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
|
||||
}
|
||||
machines := make([]*v1.Machine, len(machineKeys))
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
for index, machineKey := range machineKeys {
|
||||
_, err := ExecuteCommand(
|
||||
_, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -962,7 +979,7 @@ func (s *IntegrationCLITestSuite) TestNodeExpireCommand() {
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
machineResult, err := ExecuteCommand(
|
||||
machineResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -988,7 +1005,7 @@ func (s *IntegrationCLITestSuite) TestNodeExpireCommand() {
|
||||
|
||||
assert.Len(s.T(), machines, len(machineKeys))
|
||||
|
||||
listAllResult, err := ExecuteCommand(
|
||||
listAllResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -1014,7 +1031,7 @@ func (s *IntegrationCLITestSuite) TestNodeExpireCommand() {
|
||||
assert.True(s.T(), listAll[4].Expiry.AsTime().IsZero())
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
_, err := ExecuteCommand(
|
||||
_, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -1028,7 +1045,7 @@ func (s *IntegrationCLITestSuite) TestNodeExpireCommand() {
|
||||
assert.Nil(s.T(), err)
|
||||
}
|
||||
|
||||
listAllAfterExpiryResult, err := ExecuteCommand(
|
||||
listAllAfterExpiryResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -1060,17 +1077,17 @@ func (s *IntegrationCLITestSuite) TestNodeRenameCommand() {
|
||||
|
||||
// Randomly generated machine keys
|
||||
machineKeys := []string{
|
||||
"cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
|
||||
"8bc13285cee598acf76b1824a6f4490f7f2e3751b201e28aeb3b07fe81d5b4a1",
|
||||
"f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
|
||||
"6abd00bb5fdda622db51387088c68e97e71ce58e7056aa54f592b6a8219d524c",
|
||||
"9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
|
||||
"nodekey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
|
||||
"nodekey:8bc13285cee598acf76b1824a6f4490f7f2e3751b201e28aeb3b07fe81d5b4a1",
|
||||
"nodekey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
|
||||
"nodekey:6abd00bb5fdda622db51387088c68e97e71ce58e7056aa54f592b6a8219d524c",
|
||||
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
|
||||
}
|
||||
machines := make([]*v1.Machine, len(machineKeys))
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
for index, machineKey := range machineKeys {
|
||||
_, err := ExecuteCommand(
|
||||
_, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -1089,7 +1106,7 @@ func (s *IntegrationCLITestSuite) TestNodeRenameCommand() {
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
machineResult, err := ExecuteCommand(
|
||||
machineResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -1115,7 +1132,7 @@ func (s *IntegrationCLITestSuite) TestNodeRenameCommand() {
|
||||
|
||||
assert.Len(s.T(), machines, len(machineKeys))
|
||||
|
||||
listAllResult, err := ExecuteCommand(
|
||||
listAllResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -1141,7 +1158,7 @@ func (s *IntegrationCLITestSuite) TestNodeRenameCommand() {
|
||||
assert.Contains(s.T(), listAll[4].GetGivenName(), "machine-5")
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
_, err := ExecuteCommand(
|
||||
_, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -1156,7 +1173,7 @@ func (s *IntegrationCLITestSuite) TestNodeRenameCommand() {
|
||||
assert.Nil(s.T(), err)
|
||||
}
|
||||
|
||||
listAllAfterRenameResult, err := ExecuteCommand(
|
||||
listAllAfterRenameResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -1182,7 +1199,7 @@ func (s *IntegrationCLITestSuite) TestNodeRenameCommand() {
|
||||
assert.Contains(s.T(), listAllAfterRename[4].GetGivenName(), "machine-5")
|
||||
|
||||
// Test failure for too long names
|
||||
result, err := ExecuteCommand(
|
||||
result, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -1197,7 +1214,7 @@ func (s *IntegrationCLITestSuite) TestNodeRenameCommand() {
|
||||
assert.Nil(s.T(), err)
|
||||
assert.Contains(s.T(), result, "not be over 63 chars")
|
||||
|
||||
listAllAfterRenameAttemptResult, err := ExecuteCommand(
|
||||
listAllAfterRenameAttemptResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -1226,206 +1243,13 @@ func (s *IntegrationCLITestSuite) TestNodeRenameCommand() {
|
||||
assert.Contains(s.T(), listAllAfterRenameAttempt[4].GetGivenName(), "machine-5")
|
||||
}
|
||||
|
||||
func (s *IntegrationCLITestSuite) TestRouteCommand() {
|
||||
namespace, err := s.createNamespace("routes-namespace")
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
// Randomly generated machine keys
|
||||
machineKey := "9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe"
|
||||
|
||||
_, err = ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"debug",
|
||||
"create-node",
|
||||
"--name",
|
||||
"route-machine",
|
||||
"--namespace",
|
||||
namespace.Name,
|
||||
"--key",
|
||||
machineKey,
|
||||
"--route",
|
||||
"10.0.0.0/8",
|
||||
"--route",
|
||||
"192.168.1.0/24",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
machineResult, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"nodes",
|
||||
"--namespace",
|
||||
namespace.Name,
|
||||
"register",
|
||||
"--key",
|
||||
machineKey,
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
var machine v1.Machine
|
||||
err = json.Unmarshal([]byte(machineResult), &machine)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
assert.Equal(s.T(), uint64(1), machine.Id)
|
||||
assert.Equal(s.T(), "route-machine", machine.Name)
|
||||
|
||||
listAllResult, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"routes",
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
"--identifier",
|
||||
"0",
|
||||
},
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
var listAll v1.Routes
|
||||
err = json.Unmarshal([]byte(listAllResult), &listAll)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
assert.Len(s.T(), listAll.AdvertisedRoutes, 2)
|
||||
assert.Contains(s.T(), listAll.AdvertisedRoutes, "10.0.0.0/8")
|
||||
assert.Contains(s.T(), listAll.AdvertisedRoutes, "192.168.1.0/24")
|
||||
|
||||
assert.Empty(s.T(), listAll.EnabledRoutes)
|
||||
|
||||
enableTwoRoutesResult, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"routes",
|
||||
"enable",
|
||||
"--output",
|
||||
"json",
|
||||
"--identifier",
|
||||
"0",
|
||||
"--route",
|
||||
"10.0.0.0/8",
|
||||
"--route",
|
||||
"192.168.1.0/24",
|
||||
},
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
var enableTwoRoutes v1.Routes
|
||||
err = json.Unmarshal([]byte(enableTwoRoutesResult), &enableTwoRoutes)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
assert.Len(s.T(), enableTwoRoutes.AdvertisedRoutes, 2)
|
||||
assert.Contains(s.T(), enableTwoRoutes.AdvertisedRoutes, "10.0.0.0/8")
|
||||
assert.Contains(s.T(), enableTwoRoutes.AdvertisedRoutes, "192.168.1.0/24")
|
||||
|
||||
assert.Len(s.T(), enableTwoRoutes.EnabledRoutes, 2)
|
||||
assert.Contains(s.T(), enableTwoRoutes.EnabledRoutes, "10.0.0.0/8")
|
||||
assert.Contains(s.T(), enableTwoRoutes.EnabledRoutes, "192.168.1.0/24")
|
||||
|
||||
// Enable only one route, effectively disabling one of the routes
|
||||
enableOneRouteResult, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"routes",
|
||||
"enable",
|
||||
"--output",
|
||||
"json",
|
||||
"--identifier",
|
||||
"0",
|
||||
"--route",
|
||||
"10.0.0.0/8",
|
||||
},
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
var enableOneRoute v1.Routes
|
||||
err = json.Unmarshal([]byte(enableOneRouteResult), &enableOneRoute)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
assert.Len(s.T(), enableOneRoute.AdvertisedRoutes, 2)
|
||||
assert.Contains(s.T(), enableOneRoute.AdvertisedRoutes, "10.0.0.0/8")
|
||||
assert.Contains(s.T(), enableOneRoute.AdvertisedRoutes, "192.168.1.0/24")
|
||||
|
||||
assert.Len(s.T(), enableOneRoute.EnabledRoutes, 1)
|
||||
assert.Contains(s.T(), enableOneRoute.EnabledRoutes, "10.0.0.0/8")
|
||||
|
||||
// Enable only one route, effectively disabling one of the routes
|
||||
failEnableNonAdvertisedRoute, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"routes",
|
||||
"enable",
|
||||
"--output",
|
||||
"json",
|
||||
"--identifier",
|
||||
"0",
|
||||
"--route",
|
||||
"11.0.0.0/8",
|
||||
},
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
assert.Contains(
|
||||
s.T(),
|
||||
string(failEnableNonAdvertisedRoute),
|
||||
"route (route-machine) is not available on node",
|
||||
)
|
||||
|
||||
// Enable all routes on host
|
||||
enableAllRouteResult, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"routes",
|
||||
"enable",
|
||||
"--output",
|
||||
"json",
|
||||
"--identifier",
|
||||
"0",
|
||||
"--all",
|
||||
},
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
var enableAllRoute v1.Routes
|
||||
err = json.Unmarshal([]byte(enableAllRouteResult), &enableAllRoute)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
assert.Len(s.T(), enableAllRoute.AdvertisedRoutes, 2)
|
||||
assert.Contains(s.T(), enableAllRoute.AdvertisedRoutes, "10.0.0.0/8")
|
||||
assert.Contains(s.T(), enableAllRoute.AdvertisedRoutes, "192.168.1.0/24")
|
||||
|
||||
assert.Len(s.T(), enableAllRoute.EnabledRoutes, 2)
|
||||
assert.Contains(s.T(), enableAllRoute.EnabledRoutes, "10.0.0.0/8")
|
||||
assert.Contains(s.T(), enableAllRoute.EnabledRoutes, "192.168.1.0/24")
|
||||
}
|
||||
|
||||
func (s *IntegrationCLITestSuite) TestApiKeyCommand() {
|
||||
count := 5
|
||||
|
||||
keys := make([]string, count)
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
apiResult, err := ExecuteCommand(
|
||||
apiResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -1451,7 +1275,7 @@ func (s *IntegrationCLITestSuite) TestApiKeyCommand() {
|
||||
assert.Len(s.T(), keys, 5)
|
||||
|
||||
// Test list of keys
|
||||
listResult, err := ExecuteCommand(
|
||||
listResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -1513,7 +1337,7 @@ func (s *IntegrationCLITestSuite) TestApiKeyCommand() {
|
||||
|
||||
// Expire three keys
|
||||
for i := 0; i < 3; i++ {
|
||||
_, err := ExecuteCommand(
|
||||
_, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -1530,7 +1354,7 @@ func (s *IntegrationCLITestSuite) TestApiKeyCommand() {
|
||||
}
|
||||
|
||||
// Test list pre auth keys after expire
|
||||
listAfterExpireResult, err := ExecuteCommand(
|
||||
listAfterExpireResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -1571,9 +1395,9 @@ func (s *IntegrationCLITestSuite) TestNodeMoveCommand() {
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
// Randomly generated machine key
|
||||
machineKey := "688411b767663479632d44140f08a9fde87383adc7cdeb518f62ce28a17ef0aa"
|
||||
machineKey := "nodekey:688411b767663479632d44140f08a9fde87383adc7cdeb518f62ce28a17ef0aa"
|
||||
|
||||
_, err = ExecuteCommand(
|
||||
_, _, err = ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -1592,7 +1416,7 @@ func (s *IntegrationCLITestSuite) TestNodeMoveCommand() {
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
machineResult, err := ExecuteCommand(
|
||||
machineResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -1619,7 +1443,7 @@ func (s *IntegrationCLITestSuite) TestNodeMoveCommand() {
|
||||
|
||||
machineId := fmt.Sprintf("%d", machine.Id)
|
||||
|
||||
moveToNewNSResult, err := ExecuteCommand(
|
||||
moveToNewNSResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -1641,7 +1465,7 @@ func (s *IntegrationCLITestSuite) TestNodeMoveCommand() {
|
||||
|
||||
assert.Equal(s.T(), machine.Namespace, newNamespace)
|
||||
|
||||
listAllNodesResult, err := ExecuteCommand(
|
||||
listAllNodesResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -1664,7 +1488,7 @@ func (s *IntegrationCLITestSuite) TestNodeMoveCommand() {
|
||||
assert.Equal(s.T(), allNodes[0].Namespace, machine.Namespace)
|
||||
assert.Equal(s.T(), allNodes[0].Namespace, newNamespace)
|
||||
|
||||
moveToNonExistingNSResult, err := ExecuteCommand(
|
||||
moveToNonExistingNSResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -1688,7 +1512,7 @@ func (s *IntegrationCLITestSuite) TestNodeMoveCommand() {
|
||||
)
|
||||
assert.Equal(s.T(), machine.Namespace, newNamespace)
|
||||
|
||||
moveToOldNSResult, err := ExecuteCommand(
|
||||
moveToOldNSResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -1710,7 +1534,7 @@ func (s *IntegrationCLITestSuite) TestNodeMoveCommand() {
|
||||
|
||||
assert.Equal(s.T(), machine.Namespace, oldNamespace)
|
||||
|
||||
moveToSameNSResult, err := ExecuteCommand(
|
||||
moveToSameNSResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -1739,8 +1563,10 @@ func (s *IntegrationCLITestSuite) TestLoadConfigFromCommand() {
|
||||
assert.Nil(s.T(), err)
|
||||
altConfig, err := os.ReadFile("integration_test/etc/alt-config.dump.gold.yaml")
|
||||
assert.Nil(s.T(), err)
|
||||
altEnvConfig, err := os.ReadFile("integration_test/etc/alt-env-config.dump.gold.yaml")
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
_, err = ExecuteCommand(
|
||||
_, _, err = ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -1755,7 +1581,7 @@ func (s *IntegrationCLITestSuite) TestLoadConfigFromCommand() {
|
||||
|
||||
assert.YAMLEq(s.T(), string(defaultConfig), string(defaultDumpConfig))
|
||||
|
||||
_, err = ExecuteCommand(
|
||||
_, _, err = ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -1771,4 +1597,40 @@ func (s *IntegrationCLITestSuite) TestLoadConfigFromCommand() {
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
assert.YAMLEq(s.T(), string(altConfig), string(altDumpConfig))
|
||||
|
||||
_, _, err = ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"dumpConfig",
|
||||
},
|
||||
[]string{
|
||||
"HEADSCALE_CONFIG=/etc/headscale/alt-env-config.yaml",
|
||||
},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
altEnvDumpConfig, err := os.ReadFile("integration_test/etc/config.dump.yaml")
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
assert.YAMLEq(s.T(), string(altEnvConfig), string(altEnvDumpConfig))
|
||||
|
||||
_, _, err = ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"-c",
|
||||
"/etc/headscale/alt-config.yaml",
|
||||
"dumpConfig",
|
||||
},
|
||||
[]string{
|
||||
"HEADSCALE_CONFIG=/etc/headscale/alt-env-config.yaml",
|
||||
},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
altDumpConfig, err = os.ReadFile("integration_test/etc/config.dump.yaml")
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
assert.YAMLEq(s.T(), string(altConfig), string(altDumpConfig))
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build integration
|
||||
|
||||
//nolint
|
||||
package headscale
|
||||
|
||||
import (
|
||||
@@ -7,6 +6,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -15,23 +15,26 @@ import (
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
"inet.af/netaddr"
|
||||
)
|
||||
|
||||
const (
|
||||
headscaleHostname = "headscale-derp"
|
||||
headscaleNetwork = "headscale-test"
|
||||
headscaleHostname = "headscale"
|
||||
DOCKER_EXECUTE_TIMEOUT = 10 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
errEnvVarEmpty = errors.New("getenv: environment variable empty")
|
||||
|
||||
IpPrefix4 = netaddr.MustParseIPPrefix("100.64.0.0/10")
|
||||
IpPrefix6 = netaddr.MustParseIPPrefix("fd7a:115c:a1e0::/48")
|
||||
IpPrefix4 = netip.MustParsePrefix("100.64.0.0/10")
|
||||
IpPrefix6 = netip.MustParsePrefix("fd7a:115c:a1e0::/48")
|
||||
|
||||
tailscaleVersions = []string{
|
||||
// "head",
|
||||
// "unstable",
|
||||
"head",
|
||||
"unstable",
|
||||
"1.34.0",
|
||||
"1.32.3",
|
||||
"1.30.2",
|
||||
"1.28.0",
|
||||
"1.26.2",
|
||||
"1.24.2",
|
||||
@@ -67,7 +70,7 @@ func ExecuteCommand(
|
||||
cmd []string,
|
||||
env []string,
|
||||
options ...ExecuteCommandOption,
|
||||
) (string, error) {
|
||||
) (string, string, error) {
|
||||
var stdout bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
|
||||
@@ -77,7 +80,7 @@ func ExecuteCommand(
|
||||
|
||||
for _, opt := range options {
|
||||
if err := opt(&execConfig); err != nil {
|
||||
return "", fmt.Errorf("execute-command/options: %w", err)
|
||||
return "", "", fmt.Errorf("execute-command/options: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,7 +109,7 @@ func ExecuteCommand(
|
||||
select {
|
||||
case res := <-resultChan:
|
||||
if res.err != nil {
|
||||
return "", res.err
|
||||
return stdout.String(), stderr.String(), res.err
|
||||
}
|
||||
|
||||
if res.exitCode != 0 {
|
||||
@@ -114,13 +117,19 @@ func ExecuteCommand(
|
||||
fmt.Println("stdout: ", stdout.String())
|
||||
fmt.Println("stderr: ", stderr.String())
|
||||
|
||||
return "", fmt.Errorf("command failed with: %s", stderr.String())
|
||||
return stdout.String(), stderr.String(), fmt.Errorf(
|
||||
"command failed with: %s",
|
||||
stderr.String(),
|
||||
)
|
||||
}
|
||||
|
||||
return stdout.String(), nil
|
||||
return stdout.String(), stderr.String(), nil
|
||||
case <-time.After(execConfig.timeout):
|
||||
|
||||
return "", fmt.Errorf("command timed out after %s", execConfig.timeout)
|
||||
return stdout.String(), stderr.String(), fmt.Errorf(
|
||||
"command timed out after %s",
|
||||
execConfig.timeout,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -194,12 +203,12 @@ func getDockerBuildOptions(version string) *dockertest.BuildOptions {
|
||||
|
||||
func getIPs(
|
||||
tailscales map[string]dockertest.Resource,
|
||||
) (map[string][]netaddr.IP, error) {
|
||||
ips := make(map[string][]netaddr.IP)
|
||||
) (map[string][]netip.Addr, error) {
|
||||
ips := make(map[string][]netip.Addr)
|
||||
for hostname, tailscale := range tailscales {
|
||||
command := []string{"tailscale", "ip"}
|
||||
|
||||
result, err := ExecuteCommand(
|
||||
result, _, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
@@ -213,7 +222,7 @@ func getIPs(
|
||||
if len(address) < 1 {
|
||||
continue
|
||||
}
|
||||
ip, err := netaddr.ParseIP(address)
|
||||
ip, err := netip.ParseAddr(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -227,7 +236,7 @@ func getIPs(
|
||||
func getDNSNames(
|
||||
headscale *dockertest.Resource,
|
||||
) ([]string, error) {
|
||||
listAllResult, err := ExecuteCommand(
|
||||
listAllResult, _, err := ExecuteCommand(
|
||||
headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -260,7 +269,7 @@ func getDNSNames(
|
||||
func getMagicFQDN(
|
||||
headscale *dockertest.Resource,
|
||||
) ([]string, error) {
|
||||
listAllResult, err := ExecuteCommand(
|
||||
listAllResult, _, err := ExecuteCommand(
|
||||
headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -315,3 +324,21 @@ func GetEnvBool(key string) (bool, error) {
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func GetFirstOrCreateNetwork(pool *dockertest.Pool, name string) (dockertest.Network, error) {
|
||||
networks, err := pool.NetworksByName(name)
|
||||
if err != nil || len(networks) == 0 {
|
||||
if _, err := pool.CreateNetwork(name); err == nil {
|
||||
// Create does not give us an updated version of the resource, so we need to
|
||||
// get it again.
|
||||
networks, err := pool.NetworksByName(name)
|
||||
if err != nil {
|
||||
return dockertest.Network{}, err
|
||||
}
|
||||
|
||||
return networks[0], nil
|
||||
}
|
||||
}
|
||||
|
||||
return networks[0], nil
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build integration_derp
|
||||
|
||||
//nolint
|
||||
package headscale
|
||||
|
||||
import (
|
||||
@@ -17,34 +16,39 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ccding/go-stun/stun"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/ccding/go-stun/stun"
|
||||
)
|
||||
|
||||
const (
|
||||
namespaceName = "derpnamespace"
|
||||
totalContainers = 3
|
||||
headscaleDerpHostname = "headscale-derp"
|
||||
namespaceName = "derpnamespace"
|
||||
totalContainers = 3
|
||||
)
|
||||
|
||||
type IntegrationDERPTestSuite struct {
|
||||
suite.Suite
|
||||
stats *suite.SuiteInformation
|
||||
|
||||
pool dockertest.Pool
|
||||
networks map[int]dockertest.Network // so we keep the containers isolated
|
||||
headscale dockertest.Resource
|
||||
saveLogs bool
|
||||
pool dockertest.Pool
|
||||
network dockertest.Network
|
||||
containerNetworks map[int]dockertest.Network // so we keep the containers isolated
|
||||
headscale dockertest.Resource
|
||||
saveLogs bool
|
||||
|
||||
tailscales map[string]dockertest.Resource
|
||||
joinWaitGroup sync.WaitGroup
|
||||
}
|
||||
|
||||
func TestDERPIntegrationTestSuite(t *testing.T) {
|
||||
func TestIntegrationDERPTestSuite(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration tests due to short flag")
|
||||
}
|
||||
|
||||
saveLogs, err := GetEnvBool("HEADSCALE_INTEGRATION_SAVE_LOG")
|
||||
if err != nil {
|
||||
saveLogs = false
|
||||
@@ -53,7 +57,7 @@ func TestDERPIntegrationTestSuite(t *testing.T) {
|
||||
s := new(IntegrationDERPTestSuite)
|
||||
|
||||
s.tailscales = make(map[string]dockertest.Resource)
|
||||
s.networks = make(map[int]dockertest.Network)
|
||||
s.containerNetworks = make(map[int]dockertest.Network)
|
||||
s.saveLogs = saveLogs
|
||||
|
||||
suite.Run(t, s)
|
||||
@@ -78,7 +82,7 @@ func TestDERPIntegrationTestSuite(t *testing.T) {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
|
||||
for _, network := range s.networks {
|
||||
for _, network := range s.containerNetworks {
|
||||
if err := network.Close(); err != nil {
|
||||
log.Printf("Could not close network: %s\n", err)
|
||||
}
|
||||
@@ -93,9 +97,15 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
|
||||
s.FailNow(fmt.Sprintf("Could not connect to docker: %s", err), "")
|
||||
}
|
||||
|
||||
network, err := GetFirstOrCreateNetwork(&s.pool, headscaleNetwork)
|
||||
if err != nil {
|
||||
s.FailNow(fmt.Sprintf("Failed to create or get network: %s", err), "")
|
||||
}
|
||||
s.network = network
|
||||
|
||||
for i := 0; i < totalContainers; i++ {
|
||||
if pnetwork, err := s.pool.CreateNetwork(fmt.Sprintf("headscale-derp-%d", i)); err == nil {
|
||||
s.networks[i] = *pnetwork
|
||||
s.containerNetworks[i] = *pnetwork
|
||||
} else {
|
||||
s.FailNow(fmt.Sprintf("Could not create network: %s", err), "")
|
||||
}
|
||||
@@ -112,7 +122,7 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
|
||||
}
|
||||
|
||||
headscaleOptions := &dockertest.RunOptions{
|
||||
Name: headscaleHostname,
|
||||
Name: headscaleDerpHostname,
|
||||
Mounts: []string{
|
||||
fmt.Sprintf(
|
||||
"%s/integration_test/etc_embedded_derp:/etc/headscale",
|
||||
@@ -120,6 +130,7 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
|
||||
),
|
||||
},
|
||||
Cmd: []string{"headscale", "serve"},
|
||||
Networks: []*dockertest.Network{&s.network},
|
||||
ExposedPorts: []string{"8443/tcp", "3478/udp"},
|
||||
PortBindings: map[docker.Port][]docker.PortBinding{
|
||||
"8443/tcp": {{HostPort: "8443"}},
|
||||
@@ -127,7 +138,7 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
|
||||
},
|
||||
}
|
||||
|
||||
err = s.pool.RemoveContainerByName(headscaleHostname)
|
||||
err = s.pool.RemoveContainerByName(headscaleDerpHostname)
|
||||
if err != nil {
|
||||
s.FailNow(
|
||||
fmt.Sprintf(
|
||||
@@ -153,13 +164,15 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
|
||||
hostname, container := s.tailscaleContainer(
|
||||
fmt.Sprint(i),
|
||||
version,
|
||||
s.networks[i],
|
||||
s.containerNetworks[i],
|
||||
)
|
||||
s.tailscales[hostname] = *container
|
||||
}
|
||||
|
||||
log.Println("Waiting for headscale to be ready for embedded DERP tests")
|
||||
hostEndpoint := fmt.Sprintf("localhost:%s", s.headscale.GetPort("8443/tcp"))
|
||||
hostEndpoint := fmt.Sprintf("%s:%s",
|
||||
s.headscale.GetIPInNetwork(&s.network),
|
||||
s.headscale.GetPort("8443/tcp"))
|
||||
|
||||
if err := s.pool.Retry(func() error {
|
||||
url := fmt.Sprintf("https://%s/health", hostEndpoint)
|
||||
@@ -187,7 +200,7 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
|
||||
log.Println("headscale container is ready for embedded DERP tests")
|
||||
|
||||
log.Printf("Creating headscale namespace: %s\n", namespaceName)
|
||||
result, err := ExecuteCommand(
|
||||
result, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{"headscale", "namespaces", "create", namespaceName},
|
||||
[]string{},
|
||||
@@ -196,7 +209,7 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
log.Printf("Creating pre auth key for %s\n", namespaceName)
|
||||
preAuthResult, err := ExecuteCommand(
|
||||
preAuthResult, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
@@ -259,7 +272,7 @@ func (s *IntegrationDERPTestSuite) Join(
|
||||
|
||||
log.Println("Join command:", command)
|
||||
log.Printf("Running join command for %s\n", hostname)
|
||||
_, err := ExecuteCommand(
|
||||
_, _, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
@@ -320,7 +333,7 @@ func (s *IntegrationDERPTestSuite) TearDownSuite() {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
|
||||
for _, network := range s.networks {
|
||||
for _, network := range s.containerNetworks {
|
||||
if err := network.Close(); err != nil {
|
||||
log.Printf("Could not close network: %s\n", err)
|
||||
}
|
||||
@@ -414,7 +427,7 @@ func (s *IntegrationDERPTestSuite) TestPingAllPeersByHostname() {
|
||||
peername,
|
||||
)
|
||||
log.Println(command)
|
||||
result, err := ExecuteCommand(
|
||||
result, _, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
@@ -428,7 +441,9 @@ func (s *IntegrationDERPTestSuite) TestPingAllPeersByHostname() {
|
||||
}
|
||||
|
||||
func (s *IntegrationDERPTestSuite) TestDERPSTUN() {
|
||||
headscaleSTUNAddr := fmt.Sprintf("localhost:%s", s.headscale.GetPort("3478/udp"))
|
||||
headscaleSTUNAddr := fmt.Sprintf("%s:%s",
|
||||
s.headscale.GetIPInNetwork(&s.network),
|
||||
s.headscale.GetPort("3478/udp"))
|
||||
client := stun.NewClient()
|
||||
client.SetVerbose(true)
|
||||
client.SetVVerbose(true)
|
||||
|
||||
@@ -1,759 +0,0 @@
|
||||
//go:build integration_general
|
||||
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/client/tailscale/apitype"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
)
|
||||
|
||||
type IntegrationTestSuite struct {
|
||||
suite.Suite
|
||||
stats *suite.SuiteInformation
|
||||
|
||||
pool dockertest.Pool
|
||||
network dockertest.Network
|
||||
headscale dockertest.Resource
|
||||
saveLogs bool
|
||||
|
||||
namespaces map[string]TestNamespace
|
||||
|
||||
joinWaitGroup sync.WaitGroup
|
||||
}
|
||||
|
||||
func TestIntegrationTestSuite(t *testing.T) {
|
||||
saveLogs, err := GetEnvBool("HEADSCALE_INTEGRATION_SAVE_LOG")
|
||||
if err != nil {
|
||||
saveLogs = false
|
||||
}
|
||||
|
||||
s := new(IntegrationTestSuite)
|
||||
|
||||
s.namespaces = map[string]TestNamespace{
|
||||
"thisspace": {
|
||||
count: 10,
|
||||
tailscales: make(map[string]dockertest.Resource),
|
||||
},
|
||||
"otherspace": {
|
||||
count: 2,
|
||||
tailscales: make(map[string]dockertest.Resource),
|
||||
},
|
||||
}
|
||||
s.saveLogs = saveLogs
|
||||
|
||||
suite.Run(t, s)
|
||||
|
||||
// HandleStats, which allows us to check if we passed and save logs
|
||||
// is called after TearDown, so we cannot tear down containers before
|
||||
// we have potentially saved the logs.
|
||||
if s.saveLogs {
|
||||
for _, scales := range s.namespaces {
|
||||
for _, tailscale := range scales.tailscales {
|
||||
if err := s.pool.Purge(&tailscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !s.stats.Passed() {
|
||||
err := s.saveLog(&s.headscale, "test_output")
|
||||
if err != nil {
|
||||
log.Printf("Could not save log: %s\n", err)
|
||||
}
|
||||
}
|
||||
if err := s.pool.Purge(&s.headscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
|
||||
if err := s.network.Close(); err != nil {
|
||||
log.Printf("Could not close network: %s\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) saveLog(
|
||||
resource *dockertest.Resource,
|
||||
basePath string,
|
||||
) error {
|
||||
err := os.MkdirAll(basePath, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var stdout bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
|
||||
err = s.pool.Client.Logs(
|
||||
docker.LogsOptions{
|
||||
Context: context.TODO(),
|
||||
Container: resource.Container.ID,
|
||||
OutputStream: &stdout,
|
||||
ErrorStream: &stderr,
|
||||
Tail: "all",
|
||||
RawTerminal: false,
|
||||
Stdout: true,
|
||||
Stderr: true,
|
||||
Follow: false,
|
||||
Timestamps: false,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("Saving logs for %s to %s\n", resource.Container.Name, basePath)
|
||||
|
||||
err = os.WriteFile(
|
||||
path.Join(basePath, resource.Container.Name+".stdout.log"),
|
||||
[]byte(stdout.String()),
|
||||
0o644,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = os.WriteFile(
|
||||
path.Join(basePath, resource.Container.Name+".stderr.log"),
|
||||
[]byte(stdout.String()),
|
||||
0o644,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) Join(
|
||||
endpoint, key, hostname string,
|
||||
tailscale dockertest.Resource,
|
||||
) {
|
||||
defer s.joinWaitGroup.Done()
|
||||
|
||||
command := []string{
|
||||
"tailscale",
|
||||
"up",
|
||||
"-login-server",
|
||||
endpoint,
|
||||
"--authkey",
|
||||
key,
|
||||
"--hostname",
|
||||
hostname,
|
||||
}
|
||||
|
||||
log.Println("Join command:", command)
|
||||
log.Printf("Running join command for %s\n", hostname)
|
||||
_, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
log.Printf("%s joined\n", hostname)
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) tailscaleContainer(
|
||||
namespace, identifier, version string,
|
||||
) (string, *dockertest.Resource) {
|
||||
tailscaleBuildOptions := getDockerBuildOptions(version)
|
||||
|
||||
hostname := fmt.Sprintf(
|
||||
"%s-tailscale-%s-%s",
|
||||
namespace,
|
||||
strings.Replace(version, ".", "-", -1),
|
||||
identifier,
|
||||
)
|
||||
tailscaleOptions := &dockertest.RunOptions{
|
||||
Name: hostname,
|
||||
Networks: []*dockertest.Network{&s.network},
|
||||
Cmd: []string{
|
||||
"tailscaled", "--tun=tsdev",
|
||||
},
|
||||
}
|
||||
|
||||
pts, err := s.pool.BuildAndRunWithBuildOptions(
|
||||
tailscaleBuildOptions,
|
||||
tailscaleOptions,
|
||||
DockerRestartPolicy,
|
||||
DockerAllowLocalIPv6,
|
||||
DockerAllowNetworkAdministration,
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not start tailscale container version %s: %s", version, err)
|
||||
}
|
||||
log.Printf("Created %s container\n", hostname)
|
||||
|
||||
return hostname, pts
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) SetupSuite() {
|
||||
var err error
|
||||
app = Headscale{
|
||||
dbType: "sqlite3",
|
||||
dbString: "integration_test_db.sqlite3",
|
||||
}
|
||||
|
||||
if ppool, err := dockertest.NewPool(""); err == nil {
|
||||
s.pool = *ppool
|
||||
} else {
|
||||
s.FailNow(fmt.Sprintf("Could not connect to docker: %s", err), "")
|
||||
}
|
||||
|
||||
if pnetwork, err := s.pool.CreateNetwork("headscale-test"); err == nil {
|
||||
s.network = *pnetwork
|
||||
} else {
|
||||
s.FailNow(fmt.Sprintf("Could not create network: %s", err), "")
|
||||
}
|
||||
|
||||
headscaleBuildOptions := &dockertest.BuildOptions{
|
||||
Dockerfile: "Dockerfile",
|
||||
ContextDir: ".",
|
||||
}
|
||||
|
||||
currentPath, err := os.Getwd()
|
||||
if err != nil {
|
||||
s.FailNow(fmt.Sprintf("Could not determine current path: %s", err), "")
|
||||
}
|
||||
|
||||
headscaleOptions := &dockertest.RunOptions{
|
||||
Name: "headscale",
|
||||
Mounts: []string{
|
||||
fmt.Sprintf("%s/integration_test/etc:/etc/headscale", currentPath),
|
||||
},
|
||||
Networks: []*dockertest.Network{&s.network},
|
||||
Cmd: []string{"headscale", "serve"},
|
||||
}
|
||||
|
||||
err = s.pool.RemoveContainerByName(headscaleHostname)
|
||||
if err != nil {
|
||||
s.FailNow(
|
||||
fmt.Sprintf(
|
||||
"Could not remove existing container before building test: %s",
|
||||
err,
|
||||
),
|
||||
"",
|
||||
)
|
||||
}
|
||||
|
||||
log.Println("Creating headscale container for core integration tests")
|
||||
if pheadscale, err := s.pool.BuildAndRunWithBuildOptions(headscaleBuildOptions, headscaleOptions, DockerRestartPolicy); err == nil {
|
||||
s.headscale = *pheadscale
|
||||
} else {
|
||||
s.FailNow(fmt.Sprintf("Could not start headscale container for core integration tests: %s", err), "")
|
||||
}
|
||||
log.Println("Created headscale container for core integration tests")
|
||||
|
||||
log.Println("Creating tailscale containers for core integration tests")
|
||||
for namespace, scales := range s.namespaces {
|
||||
for i := 0; i < scales.count; i++ {
|
||||
version := tailscaleVersions[i%len(tailscaleVersions)]
|
||||
|
||||
hostname, container := s.tailscaleContainer(
|
||||
namespace,
|
||||
fmt.Sprint(i),
|
||||
version,
|
||||
)
|
||||
scales.tailscales[hostname] = *container
|
||||
}
|
||||
}
|
||||
|
||||
log.Println("Waiting for headscale to be ready for core integration tests")
|
||||
hostEndpoint := fmt.Sprintf("localhost:%s", s.headscale.GetPort("8080/tcp"))
|
||||
|
||||
if err := s.pool.Retry(func() error {
|
||||
url := fmt.Sprintf("http://%s/health", hostEndpoint)
|
||||
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
fmt.Printf("headscale for core integration test is not ready: %s\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("status code not OK")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
// TODO(kradalby): If we cannot access headscale, or any other fatal error during
|
||||
// test setup, we need to abort and tear down. However, testify does not seem to
|
||||
// support that at the moment:
|
||||
// https://github.com/stretchr/testify/issues/849
|
||||
return // fmt.Errorf("Could not connect to headscale: %s", err)
|
||||
}
|
||||
log.Println("headscale container is ready for core integration tests")
|
||||
|
||||
for namespace, scales := range s.namespaces {
|
||||
log.Printf("Creating headscale namespace: %s\n", namespace)
|
||||
result, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{"headscale", "namespaces", "create", namespace},
|
||||
[]string{},
|
||||
)
|
||||
log.Println("headscale create namespace result: ", result)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
log.Printf("Creating pre auth key for %s\n", namespace)
|
||||
preAuthResult, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"--namespace",
|
||||
namespace,
|
||||
"preauthkeys",
|
||||
"create",
|
||||
"--reusable",
|
||||
"--expiration",
|
||||
"24h",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
[]string{"LOG_LEVEL=error"},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
var preAuthKey v1.PreAuthKey
|
||||
err = json.Unmarshal([]byte(preAuthResult), &preAuthKey)
|
||||
assert.Nil(s.T(), err)
|
||||
assert.True(s.T(), preAuthKey.Reusable)
|
||||
|
||||
headscaleEndpoint := "http://headscale:8080"
|
||||
|
||||
log.Printf(
|
||||
"Joining tailscale containers to headscale at %s\n",
|
||||
headscaleEndpoint,
|
||||
)
|
||||
for hostname, tailscale := range scales.tailscales {
|
||||
s.joinWaitGroup.Add(1)
|
||||
go s.Join(headscaleEndpoint, preAuthKey.Key, hostname, tailscale)
|
||||
}
|
||||
|
||||
s.joinWaitGroup.Wait()
|
||||
}
|
||||
|
||||
// The nodes need a bit of time to get their updated maps from headscale
|
||||
// TODO: See if we can have a more deterministic wait here.
|
||||
time.Sleep(60 * time.Second)
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) TearDownSuite() {
|
||||
if !s.saveLogs {
|
||||
for _, scales := range s.namespaces {
|
||||
for _, tailscale := range scales.tailscales {
|
||||
if err := s.pool.Purge(&tailscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.pool.Purge(&s.headscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
|
||||
if err := s.network.Close(); err != nil {
|
||||
log.Printf("Could not close network: %s\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) HandleStats(
|
||||
suiteName string,
|
||||
stats *suite.SuiteInformation,
|
||||
) {
|
||||
s.stats = stats
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) TestListNodes() {
|
||||
for namespace, scales := range s.namespaces {
|
||||
log.Println("Listing nodes")
|
||||
result, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{"headscale", "--namespace", namespace, "nodes", "list"},
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
log.Printf("List nodes: \n%s\n", result)
|
||||
|
||||
// Chck that the correct count of host is present in node list
|
||||
lines := strings.Split(result, "\n")
|
||||
assert.Equal(s.T(), len(scales.tailscales), len(lines)-2)
|
||||
|
||||
for hostname := range scales.tailscales {
|
||||
assert.Contains(s.T(), result, hostname)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) TestGetIpAddresses() {
|
||||
for _, scales := range s.namespaces {
|
||||
ips, err := getIPs(scales.tailscales)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
for hostname := range scales.tailscales {
|
||||
ips := ips[hostname]
|
||||
for _, ip := range ips {
|
||||
s.T().Run(hostname, func(t *testing.T) {
|
||||
assert.NotNil(t, ip)
|
||||
|
||||
log.Printf("IP for %s: %s\n", hostname, ip)
|
||||
|
||||
// c.Assert(ip.Valid(), check.IsTrue)
|
||||
assert.True(t, ip.Is4() || ip.Is6())
|
||||
switch {
|
||||
case ip.Is4():
|
||||
assert.True(t, IpPrefix4.Contains(ip))
|
||||
case ip.Is6():
|
||||
assert.True(t, IpPrefix6.Contains(ip))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(kradalby): fix this test
|
||||
// We need some way to import ipnstate.Status from multiple go packages.
|
||||
// Currently it will only work with 1.18.x since that is the last
|
||||
// version we have in go.mod
|
||||
// func (s *IntegrationTestSuite) TestStatus() {
|
||||
// for _, scales := range s.namespaces {
|
||||
// ips, err := getIPs(scales.tailscales)
|
||||
// assert.Nil(s.T(), err)
|
||||
//
|
||||
// for hostname, tailscale := range scales.tailscales {
|
||||
// s.T().Run(hostname, func(t *testing.T) {
|
||||
// command := []string{"tailscale", "status", "--json"}
|
||||
//
|
||||
// log.Printf("Getting status for %s\n", hostname)
|
||||
// result, err := ExecuteCommand(
|
||||
// &tailscale,
|
||||
// command,
|
||||
// []string{},
|
||||
// )
|
||||
// assert.Nil(t, err)
|
||||
//
|
||||
// var status ipnstate.Status
|
||||
// err = json.Unmarshal([]byte(result), &status)
|
||||
// assert.Nil(s.T(), err)
|
||||
//
|
||||
// // TODO(kradalby): Replace this check with peer length of SAME namespace
|
||||
// // Check if we have as many nodes in status
|
||||
// // as we have IPs/tailscales
|
||||
// // lines := strings.Split(result, "\n")
|
||||
// // assert.Equal(t, len(ips), len(lines)-1)
|
||||
// // assert.Equal(t, len(scales.tailscales), len(lines)-1)
|
||||
//
|
||||
// peerIps := getIPsfromIPNstate(status)
|
||||
//
|
||||
// // Check that all hosts is present in all hosts status
|
||||
// for ipHostname, ip := range ips {
|
||||
// if hostname != ipHostname {
|
||||
// assert.Contains(t, peerIps, ip)
|
||||
// }
|
||||
// }
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
func getIPsfromIPNstate(status ipnstate.Status) []netaddr.IP {
|
||||
ips := make([]netaddr.IP, 0)
|
||||
|
||||
for _, peer := range status.Peer {
|
||||
ips = append(ips, peer.TailscaleIPs...)
|
||||
}
|
||||
|
||||
return ips
|
||||
}
|
||||
|
||||
// TODO: Adopt test for cross communication between namespaces
|
||||
func (s *IntegrationTestSuite) TestPingAllPeersByAddress() {
|
||||
for _, scales := range s.namespaces {
|
||||
ips, err := getIPs(scales.tailscales)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
for hostname, tailscale := range scales.tailscales {
|
||||
for peername, peerIPs := range ips {
|
||||
for i, ip := range peerIPs {
|
||||
// We currently cant ping ourselves, so skip that.
|
||||
if peername == hostname {
|
||||
continue
|
||||
}
|
||||
s.T().
|
||||
Run(fmt.Sprintf("%s-%s-%d", hostname, peername, i), func(t *testing.T) {
|
||||
// We are only interested in "direct ping" which means what we
|
||||
// might need a couple of more attempts before reaching the node.
|
||||
command := []string{
|
||||
"tailscale", "ping",
|
||||
"--timeout=1s",
|
||||
"--c=10",
|
||||
"--until-direct=true",
|
||||
ip.String(),
|
||||
}
|
||||
|
||||
log.Printf(
|
||||
"Pinging from %s to %s (%s)\n",
|
||||
hostname,
|
||||
peername,
|
||||
ip,
|
||||
)
|
||||
result, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
log.Printf("Result for %s: %s\n", hostname, result)
|
||||
assert.Contains(t, result, "pong")
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) TestTailDrop() {
|
||||
for _, scales := range s.namespaces {
|
||||
ips, err := getIPs(scales.tailscales)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
retry := func(times int, sleepInverval time.Duration, doWork func() error) (err error) {
|
||||
for attempts := 0; attempts < times; attempts++ {
|
||||
err = doWork()
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
time.Sleep(sleepInverval)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
for hostname, tailscale := range scales.tailscales {
|
||||
command := []string{"touch", fmt.Sprintf("/tmp/file_from_%s", hostname)}
|
||||
_, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
for peername := range ips {
|
||||
if peername == hostname {
|
||||
continue
|
||||
}
|
||||
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
|
||||
command := []string{
|
||||
"tailscale", "file", "cp",
|
||||
fmt.Sprintf("/tmp/file_from_%s", hostname),
|
||||
fmt.Sprintf("%s:", ips[peername][1]),
|
||||
}
|
||||
retry(10, 1*time.Second, func() error {
|
||||
log.Printf(
|
||||
"Sending file from %s to %s\n",
|
||||
hostname,
|
||||
peername,
|
||||
)
|
||||
_, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
ExecuteCommandTimeout(60*time.Second),
|
||||
)
|
||||
return err
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for hostname, tailscale := range scales.tailscales {
|
||||
command := []string{
|
||||
"tailscale", "file",
|
||||
"get",
|
||||
"/tmp/",
|
||||
}
|
||||
_, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
for peername, ip := range ips {
|
||||
if peername == hostname {
|
||||
continue
|
||||
}
|
||||
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
|
||||
command := []string{
|
||||
"ls",
|
||||
fmt.Sprintf("/tmp/file_from_%s", peername),
|
||||
}
|
||||
log.Printf(
|
||||
"Checking file in %s (%s) from %s (%s)\n",
|
||||
hostname,
|
||||
ips[hostname][1],
|
||||
peername,
|
||||
ip,
|
||||
)
|
||||
result, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
log.Printf("Result for %s: %s\n", peername, result)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("/tmp/file_from_%s\n", peername),
|
||||
result,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) TestPingAllPeersByHostname() {
|
||||
hostnames, err := getMagicFQDN(&s.headscale)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
log.Printf("Resolved hostnames: %#v", hostnames)
|
||||
for _, scales := range s.namespaces {
|
||||
for hostname, tailscale := range scales.tailscales {
|
||||
for _, peername := range hostnames {
|
||||
if strings.Contains(peername, hostname) {
|
||||
continue
|
||||
}
|
||||
|
||||
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
|
||||
command := []string{
|
||||
"tailscale", "ping",
|
||||
"--timeout=10s",
|
||||
"--c=20",
|
||||
"--until-direct=true",
|
||||
peername,
|
||||
}
|
||||
|
||||
log.Printf(
|
||||
"Pinging using hostname from %s to %s\n",
|
||||
hostname,
|
||||
peername,
|
||||
)
|
||||
result, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
log.Printf("Result for %s: %s\n", hostname, result)
|
||||
assert.Contains(t, result, "pong")
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) TestMagicDNS() {
|
||||
hostnames, err := getMagicFQDN(&s.headscale)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
log.Printf("Resolved hostnames: %#v", hostnames)
|
||||
|
||||
for _, scales := range s.namespaces {
|
||||
ips, err := getIPs(scales.tailscales)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
for hostname, tailscale := range scales.tailscales {
|
||||
for _, peername := range hostnames {
|
||||
if strings.Contains(peername, hostname) {
|
||||
continue
|
||||
}
|
||||
|
||||
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
|
||||
command := []string{
|
||||
"tailscale", "ip", peername,
|
||||
}
|
||||
|
||||
log.Printf(
|
||||
"Resolving name %s from %s\n",
|
||||
peername,
|
||||
hostname,
|
||||
)
|
||||
result, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
log.Printf("Result for %s: %s\n", hostname, result)
|
||||
|
||||
peerBaseName := peername[:len(peername)-MachineGivenNameHashLength-1]
|
||||
expectedAddresses := ips[peerBaseName]
|
||||
for _, ip := range expectedAddresses {
|
||||
assert.Contains(t, result, ip.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getAPIURLs(
|
||||
tailscales map[string]dockertest.Resource,
|
||||
) (map[netaddr.IP]string, error) {
|
||||
fts := make(map[netaddr.IP]string)
|
||||
for _, tailscale := range tailscales {
|
||||
command := []string{
|
||||
"curl",
|
||||
"--unix-socket",
|
||||
"/run/tailscale/tailscaled.sock",
|
||||
"http://localhost/localapi/v0/file-targets",
|
||||
}
|
||||
result, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pft []apitype.FileTarget
|
||||
if err := json.Unmarshal([]byte(result), &pft); err != nil {
|
||||
return nil, fmt.Errorf("invalid JSON: %w", err)
|
||||
}
|
||||
for _, ft := range pft {
|
||||
n := ft.Node
|
||||
for _, a := range n.Addresses { // just add all the addresses
|
||||
if _, ok := fts[a.IP()]; !ok {
|
||||
if ft.PeerAPIURL == "" {
|
||||
return nil, errors.New("api url is empty")
|
||||
}
|
||||
fts[a.IP()] = ft.PeerAPIURL
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fts, nil
|
||||
}
|
||||
@@ -3,6 +3,7 @@ cli:
|
||||
insecure: false
|
||||
timeout: 5s
|
||||
db_path: /tmp/integration_test_db.sqlite3
|
||||
db_ssl: false
|
||||
db_type: sqlite3
|
||||
derp:
|
||||
auto_update_enabled: false
|
||||
@@ -14,10 +15,12 @@ derp:
|
||||
urls:
|
||||
- https://controlplane.tailscale.com/derpmap/default
|
||||
dns_config:
|
||||
override_local_dns: true
|
||||
base_domain: headscale.net
|
||||
domains: []
|
||||
magic_dns: true
|
||||
nameservers:
|
||||
- 127.0.0.11
|
||||
- 1.1.1.1
|
||||
ephemeral_node_inactivity_timeout: 30m
|
||||
node_update_check_interval: 10s
|
||||
@@ -27,19 +30,23 @@ ip_prefixes:
|
||||
- fd7a:115c:a1e0::/48
|
||||
- 100.64.0.0/10
|
||||
listen_addr: 0.0.0.0:18080
|
||||
log_level: disabled
|
||||
log:
|
||||
level: disabled
|
||||
format: text
|
||||
logtail:
|
||||
enabled: false
|
||||
metrics_listen_addr: 127.0.0.1:19090
|
||||
oidc:
|
||||
only_start_if_oidc_is_available: true
|
||||
scope:
|
||||
- openid
|
||||
- profile
|
||||
- email
|
||||
strip_email_domain: true
|
||||
private_key_path: private.key
|
||||
noise:
|
||||
private_key_path: noise_private.key
|
||||
server_url: http://headscale:18080
|
||||
tls_client_auth_mode: relaxed
|
||||
tls_letsencrypt_cache_dir: /var/www/.cache
|
||||
tls_letsencrypt_challenge_type: HTTP-01
|
||||
unix_socket: /var/run/headscale.sock
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
log_level: trace
|
||||
log:
|
||||
level: trace
|
||||
acl_policy_path: ""
|
||||
db_type: sqlite3
|
||||
ephemeral_node_inactivity_timeout: 30m
|
||||
@@ -7,13 +8,18 @@ ip_prefixes:
|
||||
- fd7a:115c:a1e0::/48
|
||||
- 100.64.0.0/10
|
||||
dns_config:
|
||||
override_local_dns: true
|
||||
base_domain: headscale.net
|
||||
magic_dns: true
|
||||
domains: []
|
||||
nameservers:
|
||||
- 127.0.0.11
|
||||
- 1.1.1.1
|
||||
db_path: /tmp/integration_test_db.sqlite3
|
||||
db_ssl: false
|
||||
private_key_path: private.key
|
||||
noise:
|
||||
private_key_path: noise_private.key
|
||||
listen_addr: 0.0.0.0:18080
|
||||
metrics_listen_addr: 127.0.0.1:19090
|
||||
server_url: http://headscale:18080
|
||||
|
||||
53
integration_test/etc/alt-env-config.dump.gold.yaml
Normal file
53
integration_test/etc/alt-env-config.dump.gold.yaml
Normal file
@@ -0,0 +1,53 @@
|
||||
acl_policy_path: ""
|
||||
cli:
|
||||
insecure: false
|
||||
timeout: 5s
|
||||
db_path: /tmp/integration_test_db.sqlite3
|
||||
db_ssl: false
|
||||
db_type: sqlite3
|
||||
derp:
|
||||
auto_update_enabled: false
|
||||
server:
|
||||
enabled: false
|
||||
stun:
|
||||
enabled: true
|
||||
update_frequency: 1m
|
||||
urls:
|
||||
- https://controlplane.tailscale.com/derpmap/default
|
||||
dns_config:
|
||||
override_local_dns: true
|
||||
base_domain: headscale.net
|
||||
domains: []
|
||||
magic_dns: true
|
||||
nameservers:
|
||||
- 1.1.1.1
|
||||
ephemeral_node_inactivity_timeout: 30m
|
||||
node_update_check_interval: 30s
|
||||
grpc_allow_insecure: false
|
||||
grpc_listen_addr: :50443
|
||||
ip_prefixes:
|
||||
- fd7a:115c:a1e0::/48
|
||||
- 100.64.0.0/10
|
||||
listen_addr: 0.0.0.0:18080
|
||||
log:
|
||||
level: disabled
|
||||
format: text
|
||||
logtail:
|
||||
enabled: false
|
||||
metrics_listen_addr: 127.0.0.1:19090
|
||||
oidc:
|
||||
only_start_if_oidc_is_available: true
|
||||
scope:
|
||||
- openid
|
||||
- profile
|
||||
- email
|
||||
strip_email_domain: true
|
||||
private_key_path: private.key
|
||||
noise:
|
||||
private_key_path: noise_private.key
|
||||
server_url: http://headscale:18080
|
||||
tls_letsencrypt_cache_dir: /var/www/.cache
|
||||
tls_letsencrypt_challenge_type: HTTP-01
|
||||
unix_socket: /var/run/headscale.sock
|
||||
unix_socket_permission: "0o770"
|
||||
randomize_client_port: false
|
||||
30
integration_test/etc/alt-env-config.yaml
Normal file
30
integration_test/etc/alt-env-config.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
log:
|
||||
level: trace
|
||||
acl_policy_path: ""
|
||||
db_type: sqlite3
|
||||
ephemeral_node_inactivity_timeout: 30m
|
||||
node_update_check_interval: 30s
|
||||
ip_prefixes:
|
||||
- fd7a:115c:a1e0::/48
|
||||
- 100.64.0.0/10
|
||||
dns_config:
|
||||
override_local_dns: true
|
||||
base_domain: headscale.net
|
||||
magic_dns: true
|
||||
domains: []
|
||||
nameservers:
|
||||
- 1.1.1.1
|
||||
db_path: /tmp/integration_test_db.sqlite3
|
||||
db_ssl: false
|
||||
private_key_path: private.key
|
||||
noise:
|
||||
private_key_path: noise_private.key
|
||||
listen_addr: 0.0.0.0:18080
|
||||
metrics_listen_addr: 127.0.0.1:19090
|
||||
server_url: http://headscale:18080
|
||||
|
||||
derp:
|
||||
urls:
|
||||
- https://controlplane.tailscale.com/derpmap/default
|
||||
auto_update_enabled: false
|
||||
update_frequency: 1m
|
||||
@@ -3,6 +3,7 @@ cli:
|
||||
insecure: false
|
||||
timeout: 5s
|
||||
db_path: /tmp/integration_test_db.sqlite3
|
||||
db_ssl: false
|
||||
db_type: sqlite3
|
||||
derp:
|
||||
auto_update_enabled: false
|
||||
@@ -14,10 +15,12 @@ derp:
|
||||
urls:
|
||||
- https://controlplane.tailscale.com/derpmap/default
|
||||
dns_config:
|
||||
override_local_dns: true
|
||||
base_domain: headscale.net
|
||||
domains: []
|
||||
magic_dns: true
|
||||
nameservers:
|
||||
- 127.0.0.11
|
||||
- 1.1.1.1
|
||||
ephemeral_node_inactivity_timeout: 30m
|
||||
node_update_check_interval: 10s
|
||||
@@ -27,19 +30,23 @@ ip_prefixes:
|
||||
- fd7a:115c:a1e0::/48
|
||||
- 100.64.0.0/10
|
||||
listen_addr: 0.0.0.0:8080
|
||||
log_level: disabled
|
||||
log:
|
||||
format: text
|
||||
level: disabled
|
||||
logtail:
|
||||
enabled: false
|
||||
metrics_listen_addr: 127.0.0.1:9090
|
||||
oidc:
|
||||
only_start_if_oidc_is_available: true
|
||||
scope:
|
||||
- openid
|
||||
- profile
|
||||
- email
|
||||
strip_email_domain: true
|
||||
private_key_path: private.key
|
||||
noise:
|
||||
private_key_path: noise_private.key
|
||||
server_url: http://headscale:8080
|
||||
tls_client_auth_mode: relaxed
|
||||
tls_letsencrypt_cache_dir: /var/www/.cache
|
||||
tls_letsencrypt_challenge_type: HTTP-01
|
||||
unix_socket: /var/run/headscale.sock
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
log_level: trace
|
||||
log:
|
||||
level: trace
|
||||
acl_policy_path: ""
|
||||
db_type: sqlite3
|
||||
ephemeral_node_inactivity_timeout: 30m
|
||||
@@ -7,13 +8,17 @@ ip_prefixes:
|
||||
- fd7a:115c:a1e0::/48
|
||||
- 100.64.0.0/10
|
||||
dns_config:
|
||||
override_local_dns: true
|
||||
base_domain: headscale.net
|
||||
magic_dns: true
|
||||
domains: []
|
||||
nameservers:
|
||||
- 127.0.0.11
|
||||
- 1.1.1.1
|
||||
db_path: /tmp/integration_test_db.sqlite3
|
||||
private_key_path: private.key
|
||||
noise:
|
||||
private_key_path: noise_private.key
|
||||
listen_addr: 0.0.0.0:8080
|
||||
metrics_listen_addr: 127.0.0.1:9090
|
||||
server_url: http://headscale:8080
|
||||
|
||||
@@ -14,8 +14,10 @@ dns_config:
|
||||
- 1.1.1.1
|
||||
db_path: /tmp/integration_test_db.sqlite3
|
||||
private_key_path: private.key
|
||||
listen_addr: 0.0.0.0:8443
|
||||
server_url: https://headscale:8443
|
||||
noise:
|
||||
private_key_path: noise_private.key
|
||||
listen_addr: 0.0.0.0:443
|
||||
server_url: https://headscale:443
|
||||
tls_cert_path: "/etc/headscale/tls/server.crt"
|
||||
tls_key_path: "/etc/headscale/tls/server.key"
|
||||
tls_client_auth_mode: disabled
|
||||
|
||||
21
integration_test/etc_oidc/base_config.yaml
Normal file
21
integration_test/etc_oidc/base_config.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
log_level: trace
|
||||
acl_policy_path: ""
|
||||
db_type: sqlite3
|
||||
ephemeral_node_inactivity_timeout: 30m
|
||||
node_update_check_interval: 10s
|
||||
ip_prefixes:
|
||||
- fd7a:115c:a1e0::/48
|
||||
- 100.64.0.0/10
|
||||
db_path: /tmp/integration_test_db.sqlite3
|
||||
private_key_path: private.key
|
||||
noise:
|
||||
private_key_path: noise_private.key
|
||||
listen_addr: 0.0.0.0:8443
|
||||
server_url: https://headscale-oidc:8443
|
||||
tls_cert_path: "/etc/headscale/tls/server.crt"
|
||||
tls_key_path: "/etc/headscale/tls/server.key"
|
||||
derp:
|
||||
urls:
|
||||
- https://controlplane.tailscale.com/derpmap/default
|
||||
auto_update_enabled: true
|
||||
update_frequency: 1m
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user